* [PATCH 1/7] printk: Hand over printing to console if printing too long
2015-10-26 4:52 [PATCH 0/6 v2] printk: Softlockup avoidance Jan Kara
@ 2015-10-26 4:52 ` Jan Kara
2016-03-01 17:22 ` Denys Vlasenko
2015-10-26 4:52 ` [PATCH 2/7] printk: Start printing handover kthreads on demand Jan Kara
` (4 subsequent siblings)
5 siblings, 1 reply; 10+ messages in thread
From: Jan Kara @ 2015-10-26 4:52 UTC (permalink / raw)
To: Andrew Morton; +Cc: LKML, pmladek, KY Srinivasan, rostedt, Jan Kara
From: Jan Kara <jack@suse.cz>
Currently, console_unlock() prints messages from kernel printk buffer to
console while the buffer is non-empty. When serial console is attached,
printing is slow and thus other CPUs in the system have plenty of time
to append new messages to the buffer while one CPU is printing. Thus the
CPU can spend unbounded amount of time doing printing in console_unlock().
This is especially serious problem if the printk() calling
console_unlock() was called with interrupts disabled.
In practice users have observed a CPU can spend tens of seconds printing
in console_unlock() (usually during boot when hundreds of SCSI devices
are discovered) resulting in RCU stalls (CPU doing printing doesn't
reach quiescent state for a long time), softlockup reports (IPIs for the
printing CPU don't get served and thus other CPUs are spinning waiting
for the printing CPU to process IPIs), and eventually a machine death
(as messages from stalls and lockups append to printk buffer faster than
we are able to print). So these machines are unable to boot with serial
console attached. Also during artificial stress testing SATA disk
disappears from the system because its interrupts aren't served for too
long.
This patch implements a mechanism where after printing specified number
of characters (tunable as a kernel parameter printk.offload_chars), CPU
doing printing asks for help by waking up one of dedicated kthreads. As
soon as the printing CPU notices kthread got scheduled and is spinning
on print_lock dedicated for that purpose, it drops console_sem,
print_lock, and exits console_unlock(). Kthread then takes over printing
instead. This way no CPU should spend printing too long even if there
is heavy printk traffic.
Signed-off-by: Jan Kara <jack@suse.cz>
---
Documentation/kernel-parameters.txt | 15 ++++
kernel/printk/printk.c | 173 ++++++++++++++++++++++++++++++++----
2 files changed, 171 insertions(+), 17 deletions(-)
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 22a4b687ea5b..df8adee975ba 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2958,6 +2958,21 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Format: <bool> (1/Y/y=enable, 0/N/n=disable)
default: disabled
+ printk.offload_chars=
+ Printing to console can be relatively slow especially
+ in case of serial console. When there is intensive
+ printing happening from several cpus (as is the case
+ during boot), a cpu can be spending significant time
+ (seconds or more) doing printing. To avoid softlockups,
+ lost interrupts, and similar problems other cpus
+ will take over printing after the currently printing
+ cpu has printed 'printk.offload_chars' characters.
+ Higher value means possibly longer interrupt and other
+ latencies but lower overhead of printing due to handing
+ over of printing.
+ Format: <number> (0 = disabled)
+ default: 1000
+
printk.time= Show timing data prefixed to each printk message line
Format: <bool> (1/Y/y=enable, 0/N/n=disable)
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 8f0324ef72ab..1b26263edfa7 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -46,6 +46,7 @@
#include <linux/utsname.h>
#include <linux/ctype.h>
#include <linux/uio.h>
+#include <linux/kthread.h>
#include <asm/uaccess.h>
@@ -78,6 +79,29 @@ static DEFINE_SEMAPHORE(console_sem);
struct console *console_drivers;
EXPORT_SYMBOL_GPL(console_drivers);
+/*
+ * This spinlock is taken when printing to console. It is used only so that
+ * we can spin on it when some other thread wants to take over printing to
+ * console.
+ */
+static DEFINE_SPINLOCK(print_lock);
+
+/*
+ * Number of printing threads spinning on print_lock. Can go away once
+ * spin_is_contended() is reliable.
+ */
+static atomic_t printing_tasks_spinning = ATOMIC_INIT(0);
+
+/*
+ * Number of kernel threads for offloading printing. We need at least two so
+ * that they can hand over printing from one to another one and thus switch
+ * CPUs.
+ */
+#define PRINTING_TASKS 2
+
+/* Wait queue printing kthreads sleep on when idle */
+static DECLARE_WAIT_QUEUE_HEAD(print_queue);
+
#ifdef CONFIG_LOCKDEP
static struct lockdep_map console_lock_dep_map = {
.name = "console_lock"
@@ -279,6 +303,18 @@ static u32 clear_idx;
static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
static char *log_buf = __log_buf;
static u32 log_buf_len = __LOG_BUF_LEN;
+/*
+ * How many characters can we print in one call of printk before asking
+ * other cpus to continue printing. 0 means infinity. Tunable via
+ * printk.offload_chars kernel parameter. Our default 1000 means about
+ * 0.1s maximum latency due to printing.
+ */
+static unsigned int __read_mostly printk_offload_chars = 1000;
+
+module_param_named(offload_chars, printk_offload_chars, uint,
+ S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(offload_chars, "offload printing to console to a different"
+ " cpu after this number of characters");
/* Return log buffer address */
char *log_buf_addr_get(void)
@@ -2208,15 +2244,41 @@ out:
raw_spin_unlock_irqrestore(&logbuf_lock, flags);
}
+/*
+ * Returns true iff there is other cpu waiting to take over printing. This
+ * function also takes are of setting PRINTK_HANDOVER_B if we want to hand over
+ * printing to some other cpu.
+ */
+static bool cpu_stop_printing(int printed_chars)
+{
+ /* Oops? Print everything now to maximize chances user will see it */
+ if (oops_in_progress)
+ return false;
+ if (!printk_offload_chars || printed_chars < printk_offload_chars)
+ return false;
+ /*
+ * Make sure we load fresh value of printing_tasks_spinning. Matches
+ * the barrier in printing_task()
+ */
+ smp_rmb();
+ if (atomic_read(&printing_tasks_spinning))
+ return true;
+ wake_up(&print_queue);
+
+ return false;
+}
+
/**
* console_unlock - unlock the console system
*
* Releases the console_lock which the caller holds on the console system
* and the console driver list.
*
- * While the console_lock was held, console output may have been buffered
- * by printk(). If this is the case, console_unlock(); emits
- * the output prior to releasing the lock.
+ * While the console_lock was held, console output may have been buffered by
+ * printk(). If this is the case, console_unlock() emits the output prior to
+ * releasing the lock. However we need not write all the data in the buffer if
+ * we would hog the CPU for too long. In such case we try to hand over printing
+ * to a different cpu.
*
* If there is output waiting, we wake /dev/kmsg and syslog() users.
*
@@ -2230,6 +2292,8 @@ void console_unlock(void)
unsigned long flags;
bool wake_klogd = false;
bool retry;
+ bool hand_over = false;
+ int printed_chars = 0;
if (console_suspended) {
up_console_sem();
@@ -2241,13 +2305,20 @@ void console_unlock(void)
/* flush buffered message fragment immediately to console */
console_cont_flush(text, sizeof(text));
again:
+ retry = false;
+ spin_lock_irqsave(&print_lock, flags);
for (;;) {
struct printk_log *msg;
size_t ext_len = 0;
size_t len;
int level;
- raw_spin_lock_irqsave(&logbuf_lock, flags);
+ if (cpu_stop_printing(printed_chars)) {
+ hand_over = true;
+ break;
+ }
+
+ raw_spin_lock(&logbuf_lock);
if (seen_seq != log_next_seq) {
wake_klogd = true;
seen_seq = log_next_seq;
@@ -2265,8 +2336,10 @@ again:
len = 0;
}
skip:
- if (console_seq == log_next_seq)
+ if (console_seq == log_next_seq) {
+ raw_spin_unlock(&logbuf_lock);
break;
+ }
msg = log_from_idx(console_idx);
if (msg->flags & LOG_NOCONS) {
@@ -2306,28 +2379,38 @@ skip:
stop_critical_timings(); /* don't trace print latency */
call_console_drivers(level, ext_text, ext_len, text, len);
start_critical_timings();
- local_irq_restore(flags);
+ printed_chars += len;
}
- console_locked = 0;
/* Release the exclusive_console once it is used */
if (unlikely(exclusive_console))
exclusive_console = NULL;
- raw_spin_unlock(&logbuf_lock);
-
+ console_locked = 0;
up_console_sem();
+ if (!hand_over) {
+ /*
+ * Someone could have filled up the buffer again, so re-check
+ * if there's something to flush. We perform the check under
+ * print_lock to save one cli / sti pair in the fast path.
+ */
+ raw_spin_lock(&logbuf_lock);
+ retry = console_seq != log_next_seq;
+ raw_spin_unlock(&logbuf_lock);
+ }
+
/*
- * Someone could have filled up the buffer again, so re-check if there's
- * something to flush. In case we cannot trylock the console_sem again,
- * there's a new owner and the console_unlock() from them will do the
- * flush, no worries.
- */
- raw_spin_lock(&logbuf_lock);
- retry = console_seq != log_next_seq;
- raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+ * Release print_lock after console_sem so that printing_task()
+ * succeeds in getting console_sem (unless someone else takes it and
+ * then he'll be responsible for printing).
+ */
+ spin_unlock_irqrestore(&print_lock, flags);
+ /*
+ * In case we cannot trylock the console_sem again, there's a new owner
+ * and the console_unlock() from them will do the flush, no worries.
+ */
if (retry && console_trylock())
goto again;
@@ -2654,9 +2737,52 @@ int unregister_console(struct console *console)
}
EXPORT_SYMBOL(unregister_console);
+/* Kthread which takes over printing from a CPU which asks for help */
+static int printing_task(void *arg)
+{
+ unsigned long flags;
+ DEFINE_WAIT(wait);
+
+ while (1) {
+ prepare_to_wait_exclusive(&print_queue, &wait,
+ TASK_INTERRUPTIBLE);
+ schedule();
+ finish_wait(&print_queue, &wait);
+ if (kthread_should_stop())
+ break;
+ /*
+ * Disable preemption so that we do not get scheduled away from
+ * the CPU after we get print_lock and before we are finished
+ * with printing.
+ */
+ preempt_disable();
+ atomic_inc(&printing_tasks_spinning);
+ /*
+ * Store printing_tasks_spinning value before we spin. Matches
+ * the barrier in cpu_stop_printing().
+ */
+ smp_mb__after_atomic();
+ /*
+ * Wait for currently printing thread to complete. We spin on
+ * print_lock instead of waiting on console_sem since we don't
+ * want to sleep once we got scheduled to make sure we take
+ * over printing without depending on the scheduler.
+ */
+ spin_lock_irqsave(&print_lock, flags);
+ atomic_dec(&printing_tasks_spinning);
+ spin_unlock_irqrestore(&print_lock, flags);
+ if (console_trylock())
+ console_unlock();
+ preempt_enable();
+ }
+ return 0;
+}
+
static int __init printk_late_init(void)
{
struct console *con;
+ int i;
+ struct task_struct *task;
for_each_console(con) {
if (!keep_bootcon && con->flags & CON_BOOT) {
@@ -2664,6 +2790,19 @@ static int __init printk_late_init(void)
}
}
hotcpu_notifier(console_cpu_notify, 0);
+
+ /* Does any handover of printing have any sence? */
+ if (num_possible_cpus() <= 1)
+ return 0;
+
+ for (i = 0; i < PRINTING_TASKS; i++) {
+ task = kthread_run(printing_task, NULL, "print/%d", i);
+ if (IS_ERR(task)) {
+ pr_err("printk: Cannot create printing thread: %ld\n",
+ PTR_ERR(task));
+ }
+ }
+
return 0;
}
late_initcall(printk_late_init);
--
2.1.4
^ permalink raw reply related [flat|nested] 10+ messages in thread* Re: [PATCH 1/7] printk: Hand over printing to console if printing too long
2015-10-26 4:52 ` [PATCH 1/7] printk: Hand over printing to console if printing too long Jan Kara
@ 2016-03-01 17:22 ` Denys Vlasenko
2016-03-02 9:30 ` Jan Kara
0 siblings, 1 reply; 10+ messages in thread
From: Denys Vlasenko @ 2016-03-01 17:22 UTC (permalink / raw)
To: Jan Kara
Cc: Andrew Morton, LKML, pmladek, KY Srinivasan, Steven Rostedt,
Jan Kara
On Mon, Oct 26, 2015 at 5:52 AM, Jan Kara <jack@suse.com> wrote:
> This patch implements a mechanism where after printing specified number
> of characters (tunable as a kernel parameter printk.offload_chars), CPU
> doing printing asks for help by waking up one of dedicated kthreads. As
> soon as the printing CPU notices kthread got scheduled and is spinning
> on print_lock dedicated for that purpose, it drops console_sem,
> print_lock, and exits console_unlock(). Kthread then takes over printing
> instead. This way no CPU should spend printing too long even if there
> is heavy printk traffic.
> +/*
> + * Number of kernel threads for offloading printing. We need at least two so
> + * that they can hand over printing from one to another one and thus switch
> + * CPUs.
> + */
> +#define PRINTING_TASKS 2
> +
> +/* Wait queue printing kthreads sleep on when idle */
> +static DECLARE_WAIT_QUEUE_HEAD(print_queue);
Having two tasks, not one, for printking for the case
when console output is slow... sounds wasteful.
Can this be improved so that only one task is needed?
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH 1/7] printk: Hand over printing to console if printing too long
2016-03-01 17:22 ` Denys Vlasenko
@ 2016-03-02 9:30 ` Jan Kara
0 siblings, 0 replies; 10+ messages in thread
From: Jan Kara @ 2016-03-02 9:30 UTC (permalink / raw)
To: Denys Vlasenko
Cc: Jan Kara, Andrew Morton, LKML, pmladek, KY Srinivasan,
Steven Rostedt, Jan Kara
On Tue 01-03-16 18:22:25, Denys Vlasenko wrote:
> On Mon, Oct 26, 2015 at 5:52 AM, Jan Kara <jack@suse.com> wrote:
> > This patch implements a mechanism where after printing specified number
> > of characters (tunable as a kernel parameter printk.offload_chars), CPU
> > doing printing asks for help by waking up one of dedicated kthreads. As
> > soon as the printing CPU notices kthread got scheduled and is spinning
> > on print_lock dedicated for that purpose, it drops console_sem,
> > print_lock, and exits console_unlock(). Kthread then takes over printing
> > instead. This way no CPU should spend printing too long even if there
> > is heavy printk traffic.
>
>
> > +/*
> > + * Number of kernel threads for offloading printing. We need at least two so
> > + * that they can hand over printing from one to another one and thus switch
> > + * CPUs.
> > + */
> > +#define PRINTING_TASKS 2
> > +
> > +/* Wait queue printing kthreads sleep on when idle */
> > +static DECLARE_WAIT_QUEUE_HEAD(print_queue);
>
> Having two tasks, not one, for printking for the case
> when console output is slow... sounds wasteful.
>
> Can this be improved so that only one task is needed?
Probably we'll go with workqueue in the next version of the patch series.
But at least in this version you needed two tasks so that one task can hand
over printing to the other one and thus relieve the load from a CPU.
Honza
--
Jan Kara <jack@suse.com>
SUSE Labs, CR
^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH 2/7] printk: Start printing handover kthreads on demand
2015-10-26 4:52 [PATCH 0/6 v2] printk: Softlockup avoidance Jan Kara
2015-10-26 4:52 ` [PATCH 1/7] printk: Hand over printing to console if printing too long Jan Kara
@ 2015-10-26 4:52 ` Jan Kara
2015-10-26 4:52 ` [PATCH 3/7] kernel: Avoid softlockups in stop_machine() during heavy printing Jan Kara
` (3 subsequent siblings)
5 siblings, 0 replies; 10+ messages in thread
From: Jan Kara @ 2015-10-26 4:52 UTC (permalink / raw)
To: Andrew Morton; +Cc: LKML, pmladek, KY Srinivasan, rostedt, Jan Kara
From: Jan Kara <jack@suse.cz>
Start kthreads for handing over printing only when printk.offload_chars
is set to value > 0 (i.e., when print offloading gets enabled).
Signed-off-by: Jan Kara <jack@suse.cz>
---
kernel/printk/printk.c | 78 +++++++++++++++++++++++++++++++++++++++++---------
1 file changed, 64 insertions(+), 14 deletions(-)
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 1b26263edfa7..b9bb4a7a6dff 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -98,6 +98,10 @@ static atomic_t printing_tasks_spinning = ATOMIC_INIT(0);
* CPUs.
*/
#define PRINTING_TASKS 2
+/* Pointers to printing kthreads */
+static struct task_struct *printing_kthread[PRINTING_TASKS];
+/* Serialization of changes to printk_offload_chars and kthread creation */
+static DEFINE_MUTEX(printk_kthread_mutex);
/* Wait queue printing kthreads sleep on when idle */
static DECLARE_WAIT_QUEUE_HEAD(print_queue);
@@ -303,6 +307,13 @@ static u32 clear_idx;
static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
static char *log_buf = __log_buf;
static u32 log_buf_len = __LOG_BUF_LEN;
+
+static int offload_chars_set(const char *val, const struct kernel_param *kp);
+static struct kernel_param_ops offload_chars_ops = {
+ .set = offload_chars_set,
+ .get = param_get_uint,
+};
+
/*
* How many characters can we print in one call of printk before asking
* other cpus to continue printing. 0 means infinity. Tunable via
@@ -311,7 +322,7 @@ static u32 log_buf_len = __LOG_BUF_LEN;
*/
static unsigned int __read_mostly printk_offload_chars = 1000;
-module_param_named(offload_chars, printk_offload_chars, uint,
+module_param_cb(offload_chars, &offload_chars_ops, &printk_offload_chars,
S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(offload_chars, "offload printing to console to a different"
" cpu after this number of characters");
@@ -2778,12 +2789,61 @@ static int printing_task(void *arg)
return 0;
}
-static int __init printk_late_init(void)
+static int printk_start_offload_kthreads(void)
{
- struct console *con;
int i;
struct task_struct *task;
+ /* Does handover of printing make any sense? */
+ if (printk_offload_chars == 0 || num_possible_cpus() <= 1)
+ return 0;
+ for (i = 0; i < PRINTING_TASKS; i++) {
+ if (printing_kthread[i])
+ continue;
+ task = kthread_run(printing_task, NULL, "print/%d", i);
+ if (IS_ERR(task))
+ goto out_err;
+ printing_kthread[i] = task;
+ }
+ return 0;
+out_err:
+ pr_err("printk: Cannot create printing thread: %ld\n", PTR_ERR(task));
+ /* Disable offloading if creating kthreads failed */
+ printk_offload_chars = 0;
+ return PTR_ERR(task);
+}
+
+static int offload_chars_set(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+
+ /* Protect against parallel change of printk_offload_chars */
+ mutex_lock(&printk_kthread_mutex);
+ ret = param_set_uint(val, kp);
+ if (ret) {
+ mutex_unlock(&printk_kthread_mutex);
+ return ret;
+ }
+ ret = printk_start_offload_kthreads();
+ mutex_unlock(&printk_kthread_mutex);
+ return ret;
+}
+
+static void printk_offload_init(void)
+{
+ mutex_lock(&printk_kthread_mutex);
+ if (num_possible_cpus() <= 1) {
+ /* Offloading doesn't make sense. Disable print offloading. */
+ printk_offload_chars = 0;
+ } else
+ printk_start_offload_kthreads();
+ mutex_unlock(&printk_kthread_mutex);
+}
+
+static int __init printk_late_init(void)
+{
+ struct console *con;
+
for_each_console(con) {
if (!keep_bootcon && con->flags & CON_BOOT) {
unregister_console(con);
@@ -2791,17 +2851,7 @@ static int __init printk_late_init(void)
}
hotcpu_notifier(console_cpu_notify, 0);
- /* Does any handover of printing have any sence? */
- if (num_possible_cpus() <= 1)
- return 0;
-
- for (i = 0; i < PRINTING_TASKS; i++) {
- task = kthread_run(printing_task, NULL, "print/%d", i);
- if (IS_ERR(task)) {
- pr_err("printk: Cannot create printing thread: %ld\n",
- PTR_ERR(task));
- }
- }
+ printk_offload_init();
return 0;
}
--
2.1.4
^ permalink raw reply related [flat|nested] 10+ messages in thread* [PATCH 3/7] kernel: Avoid softlockups in stop_machine() during heavy printing
2015-10-26 4:52 [PATCH 0/6 v2] printk: Softlockup avoidance Jan Kara
2015-10-26 4:52 ` [PATCH 1/7] printk: Hand over printing to console if printing too long Jan Kara
2015-10-26 4:52 ` [PATCH 2/7] printk: Start printing handover kthreads on demand Jan Kara
@ 2015-10-26 4:52 ` Jan Kara
2015-10-26 4:56 ` Jan Kara
2015-10-26 4:52 ` [PATCH 4/7] panic: Always flush printk buffer before panic Jan Kara
` (2 subsequent siblings)
5 siblings, 1 reply; 10+ messages in thread
From: Jan Kara @ 2015-10-26 4:52 UTC (permalink / raw)
To: Andrew Morton; +Cc: LKML, pmladek, KY Srinivasan, rostedt, Jan Kara
From: Jan Kara <jack@suse.cz>
When there are lots of messages accumulated in printk buffer, printing
them (especially over serial console) can take a long time (tens of
seconds). stop_machine() will effectively make all cpus spin in
multi_cpu_stop() waiting for the CPU doing printing to print all the
messages which triggers NMI softlockup watchdog and RCU stall detector
which add even more to the messages to print. Since machine doesn't do
anything (except serving interrupts) during this time, also network
connections are dropped and other disturbances may happen.
Paper over the problem by waiting for printk buffer to be empty before
starting to stop CPUs. In theory a burst of new messages can be appended
to the printk buffer before CPUs enter multi_cpu_stop() so this isn't a 100%
solution but it works OK in practice and I'm not aware of a reasonably
simple better solution.
Signed-off-by: Jan Kara <jack@suse.cz>
---
include/linux/console.h | 11 +++++++++++
kernel/printk/printk.c | 25 +++++++++++++++++++++++++
kernel/stop_machine.c | 9 +++++++++
3 files changed, 45 insertions(+)
diff --git a/include/linux/console.h b/include/linux/console.h
index bd194343c346..96da462cdfeb 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -150,6 +150,17 @@ extern int console_trylock(void);
extern void console_unlock(void);
extern void console_conditional_schedule(void);
extern void console_unblank(void);
+#ifdef CONFIG_SMP
+extern void printk_log_buf_drain(void);
+#else
+/*
+ * In non-SMP kernels there won't be much to drain so save some code for tiny
+ * kernels.
+ */
+static inline void printk_log_buf_drain(void)
+{
+}
+#endif
extern struct tty_driver *console_device(int *);
extern void console_stop(struct console *);
extern void console_start(struct console *);
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index b9bb4a7a6dff..8dc6c146d022 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -2488,6 +2488,31 @@ struct tty_driver *console_device(int *index)
return driver;
}
+/* For non-SMP kernels this function isn't used and would be pointless anyway */
+#ifdef CONFIG_SMP
+/*
+ * Wait until all messages accumulated in the printk buffer are printed to
+ * console. Note that as soon as this function returns, new messages may be
+ * added to the printk buffer by other CPUs.
+ */
+void printk_log_buf_drain(void)
+{
+ bool retry;
+ unsigned long flags;
+
+ while (1) {
+ raw_spin_lock_irqsave(&logbuf_lock, flags);
+ retry = console_seq != log_next_seq;
+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+ if (!retry || console_suspended)
+ break;
+ /* Cycle console_sem to wait for outstanding printing */
+ console_lock();
+ console_unlock();
+ }
+}
+#endif
+
/*
* Prevent further output on the passed console device so that (for example)
* serial drivers can disable console output before suspending a port, and can
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 12484e5d5c88..e9496b4a3825 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -21,6 +21,7 @@
#include <linux/smpboot.h>
#include <linux/atomic.h>
#include <linux/lglock.h>
+#include <linux/console.h>
/*
* Structure to determine completion condition and record errors. May
@@ -543,6 +544,14 @@ static int __stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cp
return ret;
}
+ /*
+ * If there are lots of outstanding messages, printing them can take a
+ * long time and all cpus would be spinning waiting for the printing to
+ * finish thus triggering NMI watchdog, RCU lockups etc. Wait for the
+ * printing here to avoid these.
+ */
+ printk_log_buf_drain();
+
/* Set the initial state and stop all online cpus. */
set_state(&msdata, MULTI_STOP_PREPARE);
return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata);
--
2.1.4
^ permalink raw reply related [flat|nested] 10+ messages in thread* Re: [PATCH 3/7] kernel: Avoid softlockups in stop_machine() during heavy printing
2015-10-26 4:52 ` [PATCH 3/7] kernel: Avoid softlockups in stop_machine() during heavy printing Jan Kara
@ 2015-10-26 4:56 ` Jan Kara
0 siblings, 0 replies; 10+ messages in thread
From: Jan Kara @ 2015-10-26 4:56 UTC (permalink / raw)
To: Andrew Morton; +Cc: LKML, pmladek, KY Srinivasan, rostedt, Jan Kara
Hmph, sorry for the x/7 numbering. Patch 7 was the debug patch which I
didn't send...
Honza
On Mon 26-10-15 05:52:46, Jan Kara wrote:
> From: Jan Kara <jack@suse.cz>
>
> When there are lots of messages accumulated in printk buffer, printing
> them (especially over serial console) can take a long time (tens of
> seconds). stop_machine() will effectively make all cpus spin in
> multi_cpu_stop() waiting for the CPU doing printing to print all the
> messages which triggers NMI softlockup watchdog and RCU stall detector
> which add even more to the messages to print. Since machine doesn't do
> anything (except serving interrupts) during this time, also network
> connections are dropped and other disturbances may happen.
>
> Paper over the problem by waiting for printk buffer to be empty before
> starting to stop CPUs. In theory a burst of new messages can be appended
> to the printk buffer before CPUs enter multi_cpu_stop() so this isn't a 100%
> solution but it works OK in practice and I'm not aware of a reasonably
> simple better solution.
>
> Signed-off-by: Jan Kara <jack@suse.cz>
> ---
> include/linux/console.h | 11 +++++++++++
> kernel/printk/printk.c | 25 +++++++++++++++++++++++++
> kernel/stop_machine.c | 9 +++++++++
> 3 files changed, 45 insertions(+)
>
> diff --git a/include/linux/console.h b/include/linux/console.h
> index bd194343c346..96da462cdfeb 100644
> --- a/include/linux/console.h
> +++ b/include/linux/console.h
> @@ -150,6 +150,17 @@ extern int console_trylock(void);
> extern void console_unlock(void);
> extern void console_conditional_schedule(void);
> extern void console_unblank(void);
> +#ifdef CONFIG_SMP
> +extern void printk_log_buf_drain(void);
> +#else
> +/*
> + * In non-SMP kernels there won't be much to drain so save some code for tiny
> + * kernels.
> + */
> +static inline void printk_log_buf_drain(void)
> +{
> +}
> +#endif
> extern struct tty_driver *console_device(int *);
> extern void console_stop(struct console *);
> extern void console_start(struct console *);
> diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
> index b9bb4a7a6dff..8dc6c146d022 100644
> --- a/kernel/printk/printk.c
> +++ b/kernel/printk/printk.c
> @@ -2488,6 +2488,31 @@ struct tty_driver *console_device(int *index)
> return driver;
> }
>
> +/* For non-SMP kernels this function isn't used and would be pointless anyway */
> +#ifdef CONFIG_SMP
> +/*
> + * Wait until all messages accumulated in the printk buffer are printed to
> + * console. Note that as soon as this function returns, new messages may be
> + * added to the printk buffer by other CPUs.
> + */
> +void printk_log_buf_drain(void)
> +{
> + bool retry;
> + unsigned long flags;
> +
> + while (1) {
> + raw_spin_lock_irqsave(&logbuf_lock, flags);
> + retry = console_seq != log_next_seq;
> + raw_spin_unlock_irqrestore(&logbuf_lock, flags);
> + if (!retry || console_suspended)
> + break;
> + /* Cycle console_sem to wait for outstanding printing */
> + console_lock();
> + console_unlock();
> + }
> +}
> +#endif
> +
> /*
> * Prevent further output on the passed console device so that (for example)
> * serial drivers can disable console output before suspending a port, and can
> diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
> index 12484e5d5c88..e9496b4a3825 100644
> --- a/kernel/stop_machine.c
> +++ b/kernel/stop_machine.c
> @@ -21,6 +21,7 @@
> #include <linux/smpboot.h>
> #include <linux/atomic.h>
> #include <linux/lglock.h>
> +#include <linux/console.h>
>
> /*
> * Structure to determine completion condition and record errors. May
> @@ -543,6 +544,14 @@ static int __stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cp
> return ret;
> }
>
> + /*
> + * If there are lots of outstanding messages, printing them can take a
> + * long time and all cpus would be spinning waiting for the printing to
> + * finish thus triggering NMI watchdog, RCU lockups etc. Wait for the
> + * printing here to avoid these.
> + */
> + printk_log_buf_drain();
> +
> /* Set the initial state and stop all online cpus. */
> set_state(&msdata, MULTI_STOP_PREPARE);
> return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata);
> --
> 2.1.4
>
--
Jan Kara <jack@suse.com>
SUSE Labs, CR
^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH 4/7] panic: Always flush printk buffer before panic
2015-10-26 4:52 [PATCH 0/6 v2] printk: Softlockup avoidance Jan Kara
` (2 preceding siblings ...)
2015-10-26 4:52 ` [PATCH 3/7] kernel: Avoid softlockups in stop_machine() during heavy printing Jan Kara
@ 2015-10-26 4:52 ` Jan Kara
2015-10-26 4:52 ` [PATCH 5/7] printk: Add config option for disabling printk offloading Jan Kara
2015-10-26 4:52 ` [PATCH 6/7] printk: Avoid scheduling printing threads on the same CPU Jan Kara
5 siblings, 0 replies; 10+ messages in thread
From: Jan Kara @ 2015-10-26 4:52 UTC (permalink / raw)
To: Andrew Morton
Cc: LKML, pmladek, KY Srinivasan, rostedt, Jan Kara, Vitaly Kuznetsov
In some cases we may end up killing the CPU holding the console lock
while still having valuable data in logbuf. E.g. Vitaly is observing the
following:
- A crash is happening on one CPU and console_unlock() is being called
on some other.
- console_unlock() tries to print out the buffer before releasing the
lock and on slow console it takes time.
- in the meanwhile crashing CPU does lots of printk()-s with valuable
data (which go to the logbuf) and sends IPIs to all other CPUs.
- console_unlock() finishes printing previous chunk and enables
interrupts before trying to print out the rest, the CPU catches the
IPI and never releases console lock.
This is not the only possible case: in VT/fb subsystems we have many
other console_lock()/console_unlock() users. Non-masked interrupts (or
receiving NMI in case of extreme slowness) will have the same result.
Getting the whole console buffer printed out on crash is top priority.
So zap printk locks and print logbuf contents after all cpus have been
stopped.
Based on patch by Vitaly Kuznetsov.
CC: Vitaly Kuznetsov <vkuznets@redhat.com>
Reported-and-tested-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Jan Kara <jack@suse.com>
---
include/linux/console.h | 4 ++--
kernel/panic.c | 8 ++++++++
kernel/printk/printk.c | 5 ++++-
kernel/stop_machine.c | 2 +-
4 files changed, 15 insertions(+), 4 deletions(-)
diff --git a/include/linux/console.h b/include/linux/console.h
index 96da462cdfeb..f40084802f3f 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -151,13 +151,13 @@ extern void console_unlock(void);
extern void console_conditional_schedule(void);
extern void console_unblank(void);
#ifdef CONFIG_SMP
-extern void printk_log_buf_drain(void);
+extern void printk_log_buf_drain(bool panic);
#else
/*
* In non-SMP kernels there won't be much to drain so save some code for tiny
* kernels.
*/
-static inline void printk_log_buf_drain(void)
+static inline void printk_log_buf_drain(bool panic)
{
}
#endif
diff --git a/kernel/panic.c b/kernel/panic.c
index 04e91ff7560b..d07ed830a9fb 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -23,6 +23,7 @@
#include <linux/sysrq.h>
#include <linux/init.h>
#include <linux/nmi.h>
+#include <linux/console.h>
#define PANIC_TIMER_STEP 100
#define PANIC_BLINK_SPD 18
@@ -147,6 +148,13 @@ void panic(const char *fmt, ...)
bust_spinlocks(0);
+ /*
+ * We may have ended up stopping the CPU doing printing (in
+ * smp_send_stop()) while still having some valuable data in the
+ * console buffer. Flush it out.
+ */
+ printk_log_buf_drain(true);
+
if (!panic_blink)
panic_blink = no_blink;
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 8dc6c146d022..e404c429fe87 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -2495,11 +2495,14 @@ struct tty_driver *console_device(int *index)
* console. Note that as soon as this function returns, new messages may be
* added to the printk buffer by other CPUs.
*/
-void printk_log_buf_drain(void)
+void printk_log_buf_drain(bool panic)
{
bool retry;
unsigned long flags;
+ if (panic)
+ zap_locks();
+
while (1) {
raw_spin_lock_irqsave(&logbuf_lock, flags);
retry = console_seq != log_next_seq;
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index e9496b4a3825..50a03735893e 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -550,7 +550,7 @@ static int __stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cp
* finish thus triggering NMI watchdog, RCU lockups etc. Wait for the
* printing here to avoid these.
*/
- printk_log_buf_drain();
+ printk_log_buf_drain(false);
/* Set the initial state and stop all online cpus. */
set_state(&msdata, MULTI_STOP_PREPARE);
--
2.1.4
^ permalink raw reply related [flat|nested] 10+ messages in thread* [PATCH 5/7] printk: Add config option for disabling printk offloading
2015-10-26 4:52 [PATCH 0/6 v2] printk: Softlockup avoidance Jan Kara
` (3 preceding siblings ...)
2015-10-26 4:52 ` [PATCH 4/7] panic: Always flush printk buffer before panic Jan Kara
@ 2015-10-26 4:52 ` Jan Kara
2015-10-26 4:52 ` [PATCH 6/7] printk: Avoid scheduling printing threads on the same CPU Jan Kara
5 siblings, 0 replies; 10+ messages in thread
From: Jan Kara @ 2015-10-26 4:52 UTC (permalink / raw)
To: Andrew Morton; +Cc: LKML, pmladek, KY Srinivasan, rostedt, Jan Kara
From: Jan Kara <jack@suse.cz>
Necessity for offloading of printing was observed only for large
systems. So add a config option (disabled by default) which removes most
of the overhead added by this functionality.
Signed-off-by: Jan Kara <jack@suse.cz>
---
Documentation/kernel-parameters.txt | 13 +++++++------
init/Kconfig | 14 ++++++++++++++
kernel/printk/printk.c | 35 +++++++++++++++++++++++++++++++++--
3 files changed, 54 insertions(+), 8 deletions(-)
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index df8adee975ba..913c166fdfea 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2958,18 +2958,19 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Format: <bool> (1/Y/y=enable, 0/N/n=disable)
default: disabled
- printk.offload_chars=
+ printk.offload_chars= [KNL]
Printing to console can be relatively slow especially
in case of serial console. When there is intensive
printing happening from several cpus (as is the case
during boot), a cpu can be spending significant time
(seconds or more) doing printing. To avoid softlockups,
lost interrupts, and similar problems other cpus
- will take over printing after the currently printing
- cpu has printed 'printk.offload_chars' characters.
- Higher value means possibly longer interrupt and other
- latencies but lower overhead of printing due to handing
- over of printing.
+ will take over printing (if CONFIG_PRINTK_OFFLOAD=y)
+ after the currently printing cpu has printed
+ 'printk.offload_chars' characters. Higher value means
+ possibly longer interrupt and other latencies but
+ lower overhead of printing due to handing over of
+ printing.
Format: <number> (0 = disabled)
default: 1000
diff --git a/init/Kconfig b/init/Kconfig
index c24b6f767bf0..fa9749da5fc8 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1456,6 +1456,20 @@ config PRINTK
very difficult to diagnose system problems, saying N here is
strongly discouraged.
+config PRINTK_OFFLOAD
+ default n
+ bool "Enable support for offloading printing to different CPU"
+ depends on PRINTK && SMP
+ help
+ Printing to console can be relatively slow especially in case of
+ serial console. On large machines when there is intensive printing
+ happening from several cpus (as is the case during boot), a cpu can
+ be spending significant time (seconds or more) doing printing. To
+ avoid softlockups, lost interrupts, and similar problems other cpus
+ will take over printing after the currently printing cpu has printed
+ certain number of characters (tunable via 'printk.offload_chars'
+ kernel parameter).
+
config BUG
bool "BUG() support" if EXPERT
default y
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index e404c429fe87..5153c6518b9d 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -79,6 +79,7 @@ static DEFINE_SEMAPHORE(console_sem);
struct console *console_drivers;
EXPORT_SYMBOL_GPL(console_drivers);
+#ifdef CONFIG_PRINTK_OFFLOAD
/*
* This spinlock is taken when printing to console. It is used only so that
* we can spin on it when some other thread wants to take over printing to
@@ -105,6 +106,7 @@ static DEFINE_MUTEX(printk_kthread_mutex);
/* Wait queue printing kthreads sleep on when idle */
static DECLARE_WAIT_QUEUE_HEAD(print_queue);
+#endif /* CONFIG_PRINTK_OFFLOAD */
#ifdef CONFIG_LOCKDEP
static struct lockdep_map console_lock_dep_map = {
@@ -308,6 +310,7 @@ static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
static char *log_buf = __log_buf;
static u32 log_buf_len = __LOG_BUF_LEN;
+#ifdef CONFIG_PRINTK_OFFLOAD
static int offload_chars_set(const char *val, const struct kernel_param *kp);
static struct kernel_param_ops offload_chars_ops = {
.set = offload_chars_set,
@@ -326,6 +329,7 @@ module_param_cb(offload_chars, &offload_chars_ops, &printk_offload_chars,
S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(offload_chars, "offload printing to console to a different"
" cpu after this number of characters");
+#endif
/* Return log buffer address */
char *log_buf_addr_get(void)
@@ -2255,6 +2259,7 @@ out:
raw_spin_unlock_irqrestore(&logbuf_lock, flags);
}
+#ifdef CONFIG_PRINTK_OFFLOAD
/*
* Returns true iff there is other cpu waiting to take over printing. This
* function also takes are of setting PRINTK_HANDOVER_B if we want to hand over
@@ -2279,6 +2284,23 @@ static bool cpu_stop_printing(int printed_chars)
return false;
}
+#define spin_lock_print_lock(flags) spin_lock_irqsave(&print_lock, flags)
+
+#define spin_unlock_print_lock(flags) spin_unlock_irqrestore(&print_lock, flags)
+
+#else
+
+static bool cpu_stop_printing(int printed_chars)
+{
+ return false;
+}
+
+#define spin_lock_print_lock(flags) local_irq_save(flags)
+
+#define spin_unlock_print_lock(flags) local_irq_restore(flags)
+
+#endif
+
/**
* console_unlock - unlock the console system
*
@@ -2317,7 +2339,7 @@ void console_unlock(void)
console_cont_flush(text, sizeof(text));
again:
retry = false;
- spin_lock_irqsave(&print_lock, flags);
+ spin_lock_print_lock(flags);
for (;;) {
struct printk_log *msg;
size_t ext_len = 0;
@@ -2416,7 +2438,7 @@ skip:
* succeeds in getting console_sem (unless someone else takes it and
* then he'll be responsible for printing).
*/
- spin_unlock_irqrestore(&print_lock, flags);
+ spin_unlock_print_lock(flags);
/*
* In case we cannot trylock the console_sem again, there's a new owner
@@ -2776,6 +2798,7 @@ int unregister_console(struct console *console)
}
EXPORT_SYMBOL(unregister_console);
+#ifdef CONFIG_PRINTK_OFFLOAD
/* Kthread which takes over printing from a CPU which asks for help */
static int printing_task(void *arg)
{
@@ -2868,6 +2891,14 @@ static void printk_offload_init(void)
mutex_unlock(&printk_kthread_mutex);
}
+#else /* CONFIG_PRINTK_OFFLOAD */
+
+static void printk_offload_init(void)
+{
+}
+
+#endif /* CONFIG_PRINTK_OFFLOAD */
+
static int __init printk_late_init(void)
{
struct console *con;
--
2.1.4
^ permalink raw reply related [flat|nested] 10+ messages in thread* [PATCH 6/7] printk: Avoid scheduling printing threads on the same CPU
2015-10-26 4:52 [PATCH 0/6 v2] printk: Softlockup avoidance Jan Kara
` (4 preceding siblings ...)
2015-10-26 4:52 ` [PATCH 5/7] printk: Add config option for disabling printk offloading Jan Kara
@ 2015-10-26 4:52 ` Jan Kara
5 siblings, 0 replies; 10+ messages in thread
From: Jan Kara @ 2015-10-26 4:52 UTC (permalink / raw)
To: Andrew Morton; +Cc: LKML, pmladek, KY Srinivasan, rostedt, Jan Kara
Currently nothing forces the scheduler to schedule printing kthread on
the same CPU that is currently doing printing. In fact in some KVM
configurations this seems to happen rather frequently and it defeats
printing offloading since the current CPU is doing printing and watching
for printing kthread to come and take over however that never happens
because that kthread has been scheduled on the very same CPU.
Fix the problem by allowing each printing kthread to be scheduled only
on a subset of CPUs and these subsets are disjoint so at least one of
the kthreads is guaranteed to be able to take over printing. CPU hotplug
makes this more difficult than it should be but we cope by
redistributing kthreads among CPUs when some kthread is not able to run
anywhere.
Signed-off-by: Jan Kara <jack@suse.com>
---
kernel/printk/printk.c | 105 ++++++++++++++++++++++++++++++++++++++++++++-----
1 file changed, 96 insertions(+), 9 deletions(-)
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 5153c6518b9d..72334ed42942 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -101,8 +101,10 @@ static atomic_t printing_tasks_spinning = ATOMIC_INIT(0);
#define PRINTING_TASKS 2
/* Pointers to printing kthreads */
static struct task_struct *printing_kthread[PRINTING_TASKS];
+/* Masks of cpus allowed for printing kthreads */
+static struct cpumask *printing_kthread_mask[PRINTING_TASKS];
/* Serialization of changes to printk_offload_chars and kthread creation */
-static DEFINE_MUTEX(printk_kthread_mutex);
+static DEFINE_MUTEX(printing_kthread_mutex);
/* Wait queue printing kthreads sleep on when idle */
static DECLARE_WAIT_QUEUE_HEAD(print_queue);
@@ -2840,28 +2842,113 @@ static int printing_task(void *arg)
return 0;
}
+/* Divide online cpus among printing kthreads */
+static void distribute_printing_kthreads(void)
+{
+ int i;
+ unsigned int cpus_per_thread;
+ unsigned int cpu, seen_cpu;
+
+ for (i = 0; i < PRINTING_TASKS; i++)
+ cpumask_clear(printing_kthread_mask[i]);
+
+ cpus_per_thread = DIV_ROUND_UP(num_online_cpus(), PRINTING_TASKS);
+ seen_cpu = 0;
+ for_each_online_cpu(cpu) {
+ cpumask_set_cpu(cpu,
+ printing_kthread_mask[seen_cpu / cpus_per_thread]);
+ seen_cpu++;
+ }
+
+ for (i = 0; i < PRINTING_TASKS; i++)
+ if (!cpumask_empty(printing_kthread_mask[i]))
+ set_cpus_allowed_ptr(printing_kthread[i],
+ printing_kthread_mask[i]);
+}
+
+static int printing_kthread_cpu_notify(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+ int i;
+
+ if (printk_offload_chars == 0)
+ goto out;
+
+ /* Get exclusion against turning of printk offload off... */
+ mutex_lock(&printing_kthread_mutex);
+ /* Now a reliable check if printk offload is enabled */
+ if (printk_offload_chars == 0) {
+ mutex_unlock(&printing_kthread_mutex);
+ goto out;
+ }
+
+ if (action == CPU_ONLINE) {
+ /*
+ * Allow some task to use the CPU. We don't want to spend too
+ * much time with fair distribution so just guess. We do a fair
+ * redistribution if some task has no cpu to run on.
+ */
+ i = cpu % PRINTING_TASKS;
+ cpumask_set_cpu(cpu, printing_kthread_mask[i]);
+ set_cpus_allowed_ptr(printing_kthread[i],
+ printing_kthread_mask[i]);
+ }
+ if (action == CPU_DEAD) {
+
+ for (i = 0; i < PRINTING_TASKS; i++) {
+ if (cpumask_test_cpu(cpu, printing_kthread_mask[i])) {
+ cpumask_clear_cpu(cpu,
+ printing_kthread_mask[i]);
+ if (cpumask_empty(printing_kthread_mask[i]))
+ distribute_printing_kthreads();
+ break;
+ }
+ }
+ }
+ mutex_unlock(&printing_kthread_mutex);
+out:
+ return NOTIFY_OK;
+}
+
static int printk_start_offload_kthreads(void)
{
int i;
struct task_struct *task;
+ int ret;
/* Does handover of printing make any sense? */
if (printk_offload_chars == 0 || num_possible_cpus() <= 1)
return 0;
+
for (i = 0; i < PRINTING_TASKS; i++) {
if (printing_kthread[i])
continue;
+ printing_kthread_mask[i] = kmalloc(cpumask_size(), GFP_KERNEL);
+ if (!printing_kthread_mask[i]) {
+ pr_err("printk: Cannot allocate cpumask for printing "
+ "thread.\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
task = kthread_run(printing_task, NULL, "print/%d", i);
- if (IS_ERR(task))
+ if (IS_ERR(task)) {
+ kfree(printing_kthread_mask[i]);
+ pr_err("printk: Cannot create printing thread: %ld\n",
+ PTR_ERR(task));
+ ret = PTR_ERR(task);
goto out_err;
+ }
printing_kthread[i] = task;
}
+
+ hotcpu_notifier(printing_kthread_cpu_notify, 0);
+ distribute_printing_kthreads();
return 0;
out_err:
- pr_err("printk: Cannot create printing thread: %ld\n", PTR_ERR(task));
/* Disable offloading if creating kthreads failed */
printk_offload_chars = 0;
- return PTR_ERR(task);
+ return ret;
}
static int offload_chars_set(const char *val, const struct kernel_param *kp)
@@ -2869,26 +2956,26 @@ static int offload_chars_set(const char *val, const struct kernel_param *kp)
int ret;
/* Protect against parallel change of printk_offload_chars */
- mutex_lock(&printk_kthread_mutex);
+ mutex_lock(&printing_kthread_mutex);
ret = param_set_uint(val, kp);
if (ret) {
- mutex_unlock(&printk_kthread_mutex);
+ mutex_unlock(&printing_kthread_mutex);
return ret;
}
ret = printk_start_offload_kthreads();
- mutex_unlock(&printk_kthread_mutex);
+ mutex_unlock(&printing_kthread_mutex);
return ret;
}
static void printk_offload_init(void)
{
- mutex_lock(&printk_kthread_mutex);
+ mutex_lock(&printing_kthread_mutex);
if (num_possible_cpus() <= 1) {
/* Offloading doesn't make sense. Disable print offloading. */
printk_offload_chars = 0;
} else
printk_start_offload_kthreads();
- mutex_unlock(&printk_kthread_mutex);
+ mutex_unlock(&printing_kthread_mutex);
}
#else /* CONFIG_PRINTK_OFFLOAD */
--
2.1.4
^ permalink raw reply related [flat|nested] 10+ messages in thread