* [PATCH 1/3] powerpc: add NMI IPI infrastructure
2016-11-08 14:01 [PATCH 0/3][RFC] powerpc: NMI IPIs Nicholas Piggin
@ 2016-11-08 14:01 ` Nicholas Piggin
2016-11-08 20:35 ` kbuild test robot
2016-11-08 14:01 ` [PATCH 2/3] powerpc: add struct smp_ops_t.cause_nmi_ipi operation Nicholas Piggin
2016-11-08 14:01 ` [PATCH 3/3] powerpc/pseries: implement nmi ipi with H_SIGNAL_SYS_RESET Nicholas Piggin
2 siblings, 1 reply; 8+ messages in thread
From: Nicholas Piggin @ 2016-11-08 14:01 UTC (permalink / raw)
To: linuxppc-dev; +Cc: Nicholas Piggin, Alistair Popple
Add an NMI IPI system that handles platform differences and concurrency
and reentrancy issues.
The platform does not have to implement a true non-maskable interrupt.
The default is to simply uses the debugger break IPI message.
The debugger break users (debugger and crash) have been reimplemented
on top of the NMI.
smp_send_stop has also been implemented as NMI.
---
arch/powerpc/include/asm/smp.h | 2 +-
arch/powerpc/kernel/smp.c | 271 ++++++++++++++++++++++++++------
arch/powerpc/platforms/cell/interrupt.c | 2 +-
arch/powerpc/platforms/ps3/smp.c | 4 +-
4 files changed, 227 insertions(+), 52 deletions(-)
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 0d02c11..055918d 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -115,7 +115,7 @@ extern int cpu_to_core_id(int cpu);
#define PPC_MSG_CALL_FUNCTION 0
#define PPC_MSG_RESCHEDULE 1
#define PPC_MSG_TICK_BROADCAST 2
-#define PPC_MSG_DEBUGGER_BREAK 3
+#define PPC_MSG_NMI_IPI_SAFE 3
/* This is only used by the powernv kernel */
#define PPC_MSG_RM_HOST_ACTION 4
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 9c6f3fd..959d9dc 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -85,8 +85,6 @@ volatile unsigned int cpu_callin_map[NR_CPUS];
int smt_enabled_at_boot = 1;
-static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
-
/*
* Returns 1 if the specified cpu should be brought up during boot.
* Used to inhibit booting threads if they've been disabled or
@@ -157,17 +155,9 @@ static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t debug_ipi_action(int irq, void *data)
+static irqreturn_t nmi_ipi_safe_action(int irq, void *data)
{
- if (crash_ipi_function_ptr) {
- crash_ipi_function_ptr(get_irq_regs());
- return IRQ_HANDLED;
- }
-
-#ifdef CONFIG_DEBUGGER
- debugger_ipi(get_irq_regs());
-#endif /* CONFIG_DEBUGGER */
-
+ smp_handle_nmi_ipi(get_irq_regs());
return IRQ_HANDLED;
}
@@ -175,14 +165,14 @@ static irq_handler_t smp_ipi_action[] = {
[PPC_MSG_CALL_FUNCTION] = call_function_action,
[PPC_MSG_RESCHEDULE] = reschedule_action,
[PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
- [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action,
+ [PPC_MSG_NMI_IPI_SAFE] = nmi_ipi_safe_action,
};
const char *smp_ipi_name[] = {
[PPC_MSG_CALL_FUNCTION] = "ipi call function",
[PPC_MSG_RESCHEDULE] = "ipi reschedule",
[PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
- [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger",
+ [PPC_MSG_NMI_IPI_SAFE] = "nmi ipi",
};
/* optional function to request ipi, for controllers with >= 4 ipis */
@@ -190,14 +180,9 @@ int smp_request_message_ipi(int virq, int msg)
{
int err;
- if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) {
+ if (msg < 0 || msg > PPC_MSG_NMI_IPI_SAFE) {
return -EINVAL;
}
-#if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC)
- if (msg == PPC_MSG_DEBUGGER_BREAK) {
- return 1;
- }
-#endif
err = request_irq(virq, smp_ipi_action[msg],
IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
smp_ipi_name[msg], NULL);
@@ -277,8 +262,8 @@ irqreturn_t smp_ipi_demux(void)
scheduler_ipi();
if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
tick_broadcast_ipi_handler();
- if (all & IPI_MESSAGE(PPC_MSG_DEBUGGER_BREAK))
- debug_ipi_action(0, NULL);
+ if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI_SAFE))
+ nmi_ipi_safe_action(0, NULL);
} while (info->messages);
return IRQ_HANDLED;
@@ -315,6 +300,210 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
}
+/*
+ * "NMI IPI" infrastructure. The entire NMI call runs single threaded and with
+ * interrupts disabled, to avoid deadlocks and make message passing simpler.
+ * It runs synchronously until all targets enter the nmi handler (or the caller
+ * times out and fails), at which point the caller is released. No new NMI can
+ * be initiated until targets exit the handler.
+ *
+ * In the case of a caller timeout but the IPI subsequently being taken on a
+ * target, there is some logic to gracefully ignore the IPI, however platform
+ * code may not be able to distinguish this from a different source of NMI,
+ * so it may end up doing something like entering a debug mode.
+ *
+ * The NMI can be "safe" or "hard". Safe should not be dangerous to the system,
+ * but may not always succeed in interrupting the target. Hard is more likely
+ * to succeed but may not be recoverable afterwards, so must be used carefully.
+ */
+#define NMI_IPI_ALLBUTSELF -1
+
+#define NMI_IPI_FUNCTION_STOP 1
+#define NMI_IPI_FUNCTION_DEBUG 2
+#define NMI_IPI_FUNCTION_CRASH 3
+
+static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
+static struct cpumask nmi_ipi_pending_mask;
+static int nmi_ipi_busy_count = 0;
+static int nmi_ipi_function = 0;
+static void *nmi_ipi_data = NULL;
+
+static void nmi_ipi_lock_start(unsigned long *flags)
+{
+ raw_local_irq_save(*flags);
+ hard_irq_disable();
+ while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
+ raw_local_irq_restore(*flags);
+ cpu_relax();
+ raw_local_irq_save(*flags);
+ hard_irq_disable();
+ }
+}
+
+static void nmi_ipi_lock(void)
+{
+ while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
+ cpu_relax();
+}
+
+static void nmi_ipi_unlock(void)
+{
+ smp_mb();
+ WARN_ON(atomic_read(&__nmi_ipi_lock) != 1);
+ atomic_set(&__nmi_ipi_lock, 0);
+}
+
+static void nmi_ipi_unlock_end(unsigned long *flags)
+{
+ nmi_ipi_unlock();
+ raw_local_irq_restore(*flags);
+}
+
+static void __noreturn stop_this_cpu(void)
+{
+ /* Remove this CPU */
+ set_cpu_online(smp_processor_id(), false);
+
+ local_irq_disable();
+ hard_irq_disable();
+ while (1)
+ cpu_relax();
+}
+
+/*
+ * Platform NMI handler calls this to ack
+ */
+int smp_handle_nmi_ipi(struct pt_regs *regs)
+{
+ unsigned long flags;
+ int me = raw_smp_processor_id();
+ int ret = 0;
+
+ /*
+ * Unexpected NMIs are possible here because the interrupt may not
+ * be able to distinguish NMI IPIs from other types of NMIs, or
+ * because the caller may have timed out.
+ */
+ nmi_ipi_lock_start(&flags);
+ if (!nmi_ipi_busy_count)
+ goto out;
+ if (!cpumask_test_cpu(me, &nmi_ipi_pending_mask))
+ goto out;
+
+ cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
+ nmi_ipi_busy_count++;
+ nmi_ipi_unlock();
+
+ ret = 1;
+ if (nmi_ipi_function == NMI_IPI_FUNCTION_STOP) {
+ nmi_ipi_lock();
+ nmi_ipi_busy_count--;
+ nmi_ipi_unlock_end(&flags);
+
+ stop_this_cpu();
+
+#ifdef CONFIG_DEBUGGER
+ } else if (nmi_ipi_function == NMI_IPI_FUNCTION_DEBUG) {
+ debugger_ipi(regs);
+#endif
+
+#ifdef CONFIG_KEXEC
+ } else if (nmi_ipi_function == NMI_IPI_FUNCTION_CRASH) {
+ void (*crash_fn)(struct pt_regs *) = nmi_ipi_data;
+ crash_fn(regs);
+#endif
+
+ } else {
+ pr_warn("Unknown NMI IPI on cpu:%d\n", me);
+ }
+
+ nmi_ipi_lock();
+ nmi_ipi_busy_count--;
+out:
+ nmi_ipi_unlock_end(&flags);
+
+ return ret;
+}
+
+static void do_smp_send_nmi_ipi(int cpu)
+{
+ do_message_pass(cpu, PPC_MSG_NMI_IPI_SAFE);
+}
+
+/*
+ * - cpu is the target CPU, can be NMI_IPI_ALLBUTSELF. Must not be current CPU.
+ * - function is one of NMI_IPI_FUNCTION_.
+ * - data is for the function handler to use.
+ * - safe_udelay > 0 if the "safe" NMI is to be used, specifies delay before
+ * giving up waiting for targets to enter the handler.
+ * - hard_udelay > 0 similarly to use "hard" NMI. If both are > 0, safe is
+ * attempted first, then hard. If the platform does not support hard, then
+ * safe will be used.
+ */
+int smp_send_nmi_ipi(int cpu, int function, void *data,
+ u64 safe_udelay, u64 hard_udelay)
+{
+ unsigned long flags;
+ int c, me = raw_smp_processor_id();
+ int ret = 1;
+
+ BUG_ON(cpu == me);
+ BUG_ON(cpu < 0 && cpu != NMI_IPI_ALLBUTSELF);
+
+ if (unlikely(!smp_ops))
+ return 0;
+
+ /* Have no real NMI capability yet */
+ safe_udelay += hard_udelay;
+
+ get_online_cpus();
+
+ /* Take the nmi_ipi_busy count/lock with interrupts hard disabled */
+ nmi_ipi_lock_start(&flags);
+ while (nmi_ipi_busy_count) {
+ nmi_ipi_unlock_end(&flags);
+ cpu_relax();
+ nmi_ipi_lock_start(&flags);
+ }
+
+ nmi_ipi_function = function;
+ nmi_ipi_data = data;
+
+ if (cpu < 0) {
+ /* ALLBUTSELF */
+ cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
+ cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
+ } else {
+ /* cpumask starts clear */
+ cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
+ }
+ nmi_ipi_busy_count++;
+ nmi_ipi_unlock();
+
+ if (safe_udelay) {
+ for_each_cpu(c, &nmi_ipi_pending_mask)
+ do_smp_send_nmi_ipi(c);
+
+ do {
+ safe_udelay--;
+ udelay(1);
+ if (cpumask_empty(&nmi_ipi_pending_mask))
+ goto done;
+ } while (safe_udelay);
+ }
+
+ ret = 0; /* Could not gather all CPUs */
+ cpumask_clear(&nmi_ipi_pending_mask);
+done:
+ nmi_ipi_lock();
+ nmi_ipi_busy_count--;
+ nmi_ipi_unlock_end(&flags);
+
+ put_online_cpus();
+
+ return ret;
+}
+
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
void tick_broadcast(const struct cpumask *mask)
{
@@ -325,45 +514,31 @@ void tick_broadcast(const struct cpumask *mask)
}
#endif
-#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
+#ifdef CONFIG_DEBUGGER
void smp_send_debugger_break(void)
{
- int cpu;
- int me = raw_smp_processor_id();
-
- if (unlikely(!smp_ops))
- return;
-
- for_each_online_cpu(cpu)
- if (cpu != me)
- do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
+ smp_send_nmi_ipi(NMI_IPI_ALLBUTSELF,
+ NMI_IPI_FUNCTION_DEBUG, NULL,
+ 1000000, 0);
}
#endif
#ifdef CONFIG_KEXEC
void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
{
- crash_ipi_function_ptr = crash_ipi_callback;
- if (crash_ipi_callback) {
- mb();
- smp_send_debugger_break();
- }
+ smp_send_nmi_ipi(NMI_IPI_ALLBUTSELF,
+ NMI_IPI_FUNCTION_CRASH, crash_ipi_callback,
+ 0, 1000000);
}
#endif
-static void stop_this_cpu(void *dummy)
-{
- /* Remove this CPU */
- set_cpu_online(smp_processor_id(), false);
-
- local_irq_disable();
- while (1)
- ;
-}
-
void smp_send_stop(void)
{
- smp_call_function(stop_this_cpu, NULL, 0);
+ smp_send_nmi_ipi(NMI_IPI_ALLBUTSELF,
+ NMI_IPI_FUNCTION_STOP, NULL,
+ 0, 1000000);
+
+ stop_this_cpu();
}
struct thread_info *current_set[NR_CPUS];
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index a6bbbab..fcc94c8 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -211,7 +211,7 @@ void iic_request_IPIs(void)
iic_request_ipi(PPC_MSG_CALL_FUNCTION);
iic_request_ipi(PPC_MSG_RESCHEDULE);
iic_request_ipi(PPC_MSG_TICK_BROADCAST);
- iic_request_ipi(PPC_MSG_DEBUGGER_BREAK);
+ iic_request_ipi(PPC_MSG_NMI_IPI_SAFE);
}
#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c
index 60154d0..5b1fb70 100644
--- a/arch/powerpc/platforms/ps3/smp.c
+++ b/arch/powerpc/platforms/ps3/smp.c
@@ -77,7 +77,7 @@ static void __init ps3_smp_probe(void)
BUILD_BUG_ON(PPC_MSG_CALL_FUNCTION != 0);
BUILD_BUG_ON(PPC_MSG_RESCHEDULE != 1);
BUILD_BUG_ON(PPC_MSG_TICK_BROADCAST != 2);
- BUILD_BUG_ON(PPC_MSG_DEBUGGER_BREAK != 3);
+ BUILD_BUG_ON(PPC_MSG_NMI_IPI_SAFE != 3);
for (i = 0; i < MSG_COUNT; i++) {
result = ps3_event_receive_port_setup(cpu, &virqs[i]);
@@ -96,7 +96,7 @@ static void __init ps3_smp_probe(void)
ps3_register_ipi_irq(cpu, virqs[i]);
}
- ps3_register_ipi_debug_brk(cpu, virqs[PPC_MSG_DEBUGGER_BREAK]);
+ ps3_register_ipi_debug_brk(cpu, virqs[PPC_MSG_NMI_IPI_SAFE]);
DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu);
}
--
2.10.2
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH 2/3] powerpc: add struct smp_ops_t.cause_nmi_ipi operation
2016-11-08 14:01 [PATCH 0/3][RFC] powerpc: NMI IPIs Nicholas Piggin
2016-11-08 14:01 ` [PATCH 1/3] powerpc: add NMI IPI infrastructure Nicholas Piggin
@ 2016-11-08 14:01 ` Nicholas Piggin
2016-11-11 1:00 ` Alistair Popple
2016-11-08 14:01 ` [PATCH 3/3] powerpc/pseries: implement nmi ipi with H_SIGNAL_SYS_RESET Nicholas Piggin
2 siblings, 1 reply; 8+ messages in thread
From: Nicholas Piggin @ 2016-11-08 14:01 UTC (permalink / raw)
To: linuxppc-dev; +Cc: Nicholas Piggin, Alistair Popple
Allow platforms to provide an NMI IPI, and wire that to the NMI
system.
---
arch/powerpc/include/asm/smp.h | 5 +++++
arch/powerpc/kernel/smp.c | 21 ++++++++++++++++-----
arch/powerpc/platforms/85xx/smp.c | 1 +
arch/powerpc/platforms/86xx/mpc86xx_smp.c | 1 +
arch/powerpc/platforms/chrp/smp.c | 1 +
arch/powerpc/platforms/powermac/smp.c | 1 +
arch/powerpc/platforms/powernv/smp.c | 1 +
arch/powerpc/platforms/pseries/smp.c | 1 +
8 files changed, 27 insertions(+), 5 deletions(-)
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 055918d..15521c1 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -37,11 +37,15 @@ extern int cpu_to_chip_id(int cpu);
#ifdef CONFIG_SMP
+#define SMP_OP_NMI_TYPE_SAFE 1
+#define SMP_OP_NMI_TYPE_HARD 2
+
struct smp_ops_t {
void (*message_pass)(int cpu, int msg);
#ifdef CONFIG_PPC_SMP_MUXED_IPI
void (*cause_ipi)(int cpu, unsigned long data);
#endif
+ int (*cause_nmi_ipi)(int cpu, int type);
void (*probe)(void);
int (*kick_cpu)(int nr);
void (*setup_cpu)(int nr);
@@ -53,6 +57,7 @@ struct smp_ops_t {
int (*cpu_bootable)(unsigned int nr);
};
+extern int smp_handle_nmi_ipi(struct pt_regs *regs);
extern void smp_send_debugger_break(void);
extern void start_secondary_resume(void);
extern void smp_generic_give_timebase(void);
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 959d9dc..b3133e9 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -425,8 +425,10 @@ int smp_handle_nmi_ipi(struct pt_regs *regs)
return ret;
}
-static void do_smp_send_nmi_ipi(int cpu)
+static void do_smp_send_nmi_ipi(int cpu, int type)
{
+ if (smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu, type))
+ return;
do_message_pass(cpu, PPC_MSG_NMI_IPI_SAFE);
}
@@ -453,9 +455,6 @@ int smp_send_nmi_ipi(int cpu, int function, void *data,
if (unlikely(!smp_ops))
return 0;
- /* Have no real NMI capability yet */
- safe_udelay += hard_udelay;
-
get_online_cpus();
/* Take the nmi_ipi_busy count/lock with interrupts hard disabled */
@@ -482,7 +481,7 @@ int smp_send_nmi_ipi(int cpu, int function, void *data,
if (safe_udelay) {
for_each_cpu(c, &nmi_ipi_pending_mask)
- do_smp_send_nmi_ipi(c);
+ do_smp_send_nmi_ipi(c, SMP_OP_NMI_TYPE_SAFE);
do {
safe_udelay--;
@@ -492,6 +491,18 @@ int smp_send_nmi_ipi(int cpu, int function, void *data,
} while (safe_udelay);
}
+ if (hard_udelay) {
+ for_each_cpu(c, &nmi_ipi_pending_mask)
+ do_smp_send_nmi_ipi(c, SMP_OP_NMI_TYPE_HARD);
+
+ do {
+ hard_udelay--;
+ udelay(1);
+ if (cpumask_empty(&nmi_ipi_pending_mask))
+ goto done;
+ } while (hard_udelay);
+ }
+
ret = 0; /* Could not gather all CPUs */
cpumask_clear(&nmi_ipi_pending_mask);
done:
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index fe9f19e..cde7beb 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -343,6 +343,7 @@ static int smp_85xx_kick_cpu(int nr)
}
struct smp_ops_t smp_85xx_ops = {
+ .cause_nmi_ipi = NULL,
.kick_cpu = smp_85xx_kick_cpu,
.cpu_bootable = smp_generic_cpu_bootable,
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_smp.c b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
index af09bae..020e84a 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_smp.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
@@ -105,6 +105,7 @@ smp_86xx_setup_cpu(int cpu_nr)
struct smp_ops_t smp_86xx_ops = {
+ .cause_nmi_ipi = NULL,
.message_pass = smp_mpic_message_pass,
.probe = smp_mpic_probe,
.kick_cpu = smp_86xx_kick_cpu,
diff --git a/arch/powerpc/platforms/chrp/smp.c b/arch/powerpc/platforms/chrp/smp.c
index b6c9a0d..1451504 100644
--- a/arch/powerpc/platforms/chrp/smp.c
+++ b/arch/powerpc/platforms/chrp/smp.c
@@ -44,6 +44,7 @@ static void smp_chrp_setup_cpu(int cpu_nr)
/* CHRP with openpic */
struct smp_ops_t chrp_smp_ops = {
+ .cause_nmi_ipi = NULL,
.message_pass = smp_mpic_message_pass,
.probe = smp_mpic_probe,
.kick_cpu = smp_chrp_kick_cpu,
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index c9eb7d6..1d76e15 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -446,6 +446,7 @@ void __init smp_psurge_give_timebase(void)
struct smp_ops_t psurge_smp_ops = {
.message_pass = NULL, /* Use smp_muxed_ipi_message_pass */
.cause_ipi = smp_psurge_cause_ipi,
+ .cause_nmi_ipi = NULL,
.probe = smp_psurge_probe,
.kick_cpu = smp_psurge_kick_cpu,
.setup_cpu = smp_psurge_setup_cpu,
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index c789258..092ec1f 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -244,6 +244,7 @@ static int pnv_cpu_bootable(unsigned int nr)
static struct smp_ops_t pnv_smp_ops = {
.message_pass = smp_muxed_ipi_message_pass,
.cause_ipi = NULL, /* Filled at runtime by xics_smp_probe() */
+ .cause_nmi_ipi = NULL,
.probe = xics_smp_probe,
.kick_cpu = pnv_smp_kick_cpu,
.setup_cpu = pnv_smp_setup_cpu,
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index f6f83ae..0f6522c 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -209,6 +209,7 @@ static __init void pSeries_smp_probe(void)
static struct smp_ops_t pseries_smp_ops = {
.message_pass = NULL, /* Use smp_muxed_ipi_message_pass */
.cause_ipi = NULL, /* Filled at runtime by pSeries_smp_probe() */
+ .cause_nmi_ipi = NULL,
.probe = pSeries_smp_probe,
.kick_cpu = smp_pSeries_kick_cpu,
.setup_cpu = smp_setup_cpu,
--
2.10.2
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH 3/3] powerpc/pseries: implement nmi ipi with H_SIGNAL_SYS_RESET
2016-11-08 14:01 [PATCH 0/3][RFC] powerpc: NMI IPIs Nicholas Piggin
2016-11-08 14:01 ` [PATCH 1/3] powerpc: add NMI IPI infrastructure Nicholas Piggin
2016-11-08 14:01 ` [PATCH 2/3] powerpc: add struct smp_ops_t.cause_nmi_ipi operation Nicholas Piggin
@ 2016-11-08 14:01 ` Nicholas Piggin
2016-11-08 18:38 ` kbuild test robot
2 siblings, 1 reply; 8+ messages in thread
From: Nicholas Piggin @ 2016-11-08 14:01 UTC (permalink / raw)
To: linuxppc-dev; +Cc: Nicholas Piggin, Alistair Popple
H_SIGNAL_SYS_RESET can provide a hard NMI (it is not recoverable if
raised when the target has MSR_RI clear).
This patch makes a couple of changes to generic system_reset_exception
handler, which should be split out and platforms defining a handler
audited.
---
arch/powerpc/kernel/traps.c | 7 +++++--
arch/powerpc/platforms/pseries/ras.c | 4 ++++
arch/powerpc/platforms/pseries/smp.c | 12 +++++++++++-
3 files changed, 20 insertions(+), 3 deletions(-)
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 023a462..d1d7fd4 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -275,19 +275,22 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
void system_reset_exception(struct pt_regs *regs)
{
+ nmi_enter();
+
/* See if any machine dependent calls */
if (ppc_md.system_reset_exception) {
if (ppc_md.system_reset_exception(regs))
- return;
+ goto done;
}
die("System Reset", regs, SIGABRT);
+done:
/* Must die if the interrupt is not recoverable */
if (!(regs->msr & MSR_RI))
panic("Unrecoverable System Reset");
- /* What should we do here? We could issue a shutdown or hard reset. */
+ nmi_exit();
}
#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 904a677..bb70b26 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -386,6 +386,10 @@ int pSeries_system_reset_exception(struct pt_regs *regs)
}
fwnmi_release_errinfo();
}
+
+ if (smp_handle_nmi_ipi(regs))
+ return 1;
+
return 0; /* need to perform reset */
}
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 0f6522c..4534c5a 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -196,6 +196,16 @@ static void pSeries_cause_ipi_mux(int cpu, unsigned long data)
xics_cause_ipi(cpu, data);
}
+static int pseries_cause_nmi_ipi(int cpu, int type)
+{
+ if (type == SMP_OP_NMI_TYPE_HARD) {
+ if (plapr_signal_sys_reset(cpu) == H_SUCCESS)
+ return 1;
+ }
+
+ return 0;
+}
+
static __init void pSeries_smp_probe(void)
{
xics_smp_probe();
@@ -209,7 +219,7 @@ static __init void pSeries_smp_probe(void)
static struct smp_ops_t pseries_smp_ops = {
.message_pass = NULL, /* Use smp_muxed_ipi_message_pass */
.cause_ipi = NULL, /* Filled at runtime by pSeries_smp_probe() */
- .cause_nmi_ipi = NULL,
+ .cause_nmi_ipi = pseries_cause_nmi_ipi,
.probe = pSeries_smp_probe,
.kick_cpu = smp_pSeries_kick_cpu,
.setup_cpu = smp_setup_cpu,
--
2.10.2
^ permalink raw reply related [flat|nested] 8+ messages in thread