From: Jan Kara <jack@suse.com>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: LKML <linux-kernel@vger.kernel.org>,
pmladek@suse.com, KY Srinivasan <kys@microsoft.com>,
rostedt@goodmis.org, Jan Kara <jack@suse.com>
Subject: [PATCH 6/7] printk: Avoid scheduling printing threads on the same CPU
Date: Mon, 26 Oct 2015 05:52:49 +0100 [thread overview]
Message-ID: <1445835169-8203-7-git-send-email-jack@suse.com> (raw)
In-Reply-To: <1445835169-8203-1-git-send-email-jack@suse.com>
Currently nothing forces the scheduler to schedule printing kthread on
the same CPU that is currently doing printing. In fact in some KVM
configurations this seems to happen rather frequently and it defeats
printing offloading since the current CPU is doing printing and watching
for printing kthread to come and take over however that never happens
because that kthread has been scheduled on the very same CPU.
Fix the problem by allowing each printing kthread to be scheduled only
on a subset of CPUs and these subsets are disjoint so at least one of
the kthreads is guaranteed to be able to take over printing. CPU hotplug
makes this more difficult than it should be but we cope by
redistributing kthreads among CPUs when some kthread is not able to run
anywhere.
Signed-off-by: Jan Kara <jack@suse.com>
---
kernel/printk/printk.c | 105 ++++++++++++++++++++++++++++++++++++++++++++-----
1 file changed, 96 insertions(+), 9 deletions(-)
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 5153c6518b9d..72334ed42942 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -101,8 +101,10 @@ static atomic_t printing_tasks_spinning = ATOMIC_INIT(0);
#define PRINTING_TASKS 2
/* Pointers to printing kthreads */
static struct task_struct *printing_kthread[PRINTING_TASKS];
+/* Masks of cpus allowed for printing kthreads */
+static struct cpumask *printing_kthread_mask[PRINTING_TASKS];
/* Serialization of changes to printk_offload_chars and kthread creation */
-static DEFINE_MUTEX(printk_kthread_mutex);
+static DEFINE_MUTEX(printing_kthread_mutex);
/* Wait queue printing kthreads sleep on when idle */
static DECLARE_WAIT_QUEUE_HEAD(print_queue);
@@ -2840,28 +2842,113 @@ static int printing_task(void *arg)
return 0;
}
+/* Divide online cpus among printing kthreads */
+static void distribute_printing_kthreads(void)
+{
+ int i;
+ unsigned int cpus_per_thread;
+ unsigned int cpu, seen_cpu;
+
+ for (i = 0; i < PRINTING_TASKS; i++)
+ cpumask_clear(printing_kthread_mask[i]);
+
+ cpus_per_thread = DIV_ROUND_UP(num_online_cpus(), PRINTING_TASKS);
+ seen_cpu = 0;
+ for_each_online_cpu(cpu) {
+ cpumask_set_cpu(cpu,
+ printing_kthread_mask[seen_cpu / cpus_per_thread]);
+ seen_cpu++;
+ }
+
+ for (i = 0; i < PRINTING_TASKS; i++)
+ if (!cpumask_empty(printing_kthread_mask[i]))
+ set_cpus_allowed_ptr(printing_kthread[i],
+ printing_kthread_mask[i]);
+}
+
+static int printing_kthread_cpu_notify(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+ int i;
+
+ if (printk_offload_chars == 0)
+ goto out;
+
+ /* Get exclusion against turning of printk offload off... */
+ mutex_lock(&printing_kthread_mutex);
+ /* Now a reliable check if printk offload is enabled */
+ if (printk_offload_chars == 0) {
+ mutex_unlock(&printing_kthread_mutex);
+ goto out;
+ }
+
+ if (action == CPU_ONLINE) {
+ /*
+ * Allow some task to use the CPU. We don't want to spend too
+ * much time with fair distribution so just guess. We do a fair
+ * redistribution if some task has no cpu to run on.
+ */
+ i = cpu % PRINTING_TASKS;
+ cpumask_set_cpu(cpu, printing_kthread_mask[i]);
+ set_cpus_allowed_ptr(printing_kthread[i],
+ printing_kthread_mask[i]);
+ }
+ if (action == CPU_DEAD) {
+
+ for (i = 0; i < PRINTING_TASKS; i++) {
+ if (cpumask_test_cpu(cpu, printing_kthread_mask[i])) {
+ cpumask_clear_cpu(cpu,
+ printing_kthread_mask[i]);
+ if (cpumask_empty(printing_kthread_mask[i]))
+ distribute_printing_kthreads();
+ break;
+ }
+ }
+ }
+ mutex_unlock(&printing_kthread_mutex);
+out:
+ return NOTIFY_OK;
+}
+
static int printk_start_offload_kthreads(void)
{
int i;
struct task_struct *task;
+ int ret;
/* Does handover of printing make any sense? */
if (printk_offload_chars == 0 || num_possible_cpus() <= 1)
return 0;
+
for (i = 0; i < PRINTING_TASKS; i++) {
if (printing_kthread[i])
continue;
+ printing_kthread_mask[i] = kmalloc(cpumask_size(), GFP_KERNEL);
+ if (!printing_kthread_mask[i]) {
+ pr_err("printk: Cannot allocate cpumask for printing "
+ "thread.\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
task = kthread_run(printing_task, NULL, "print/%d", i);
- if (IS_ERR(task))
+ if (IS_ERR(task)) {
+ kfree(printing_kthread_mask[i]);
+ pr_err("printk: Cannot create printing thread: %ld\n",
+ PTR_ERR(task));
+ ret = PTR_ERR(task);
goto out_err;
+ }
printing_kthread[i] = task;
}
+
+ hotcpu_notifier(printing_kthread_cpu_notify, 0);
+ distribute_printing_kthreads();
return 0;
out_err:
- pr_err("printk: Cannot create printing thread: %ld\n", PTR_ERR(task));
/* Disable offloading if creating kthreads failed */
printk_offload_chars = 0;
- return PTR_ERR(task);
+ return ret;
}
static int offload_chars_set(const char *val, const struct kernel_param *kp)
@@ -2869,26 +2956,26 @@ static int offload_chars_set(const char *val, const struct kernel_param *kp)
int ret;
/* Protect against parallel change of printk_offload_chars */
- mutex_lock(&printk_kthread_mutex);
+ mutex_lock(&printing_kthread_mutex);
ret = param_set_uint(val, kp);
if (ret) {
- mutex_unlock(&printk_kthread_mutex);
+ mutex_unlock(&printing_kthread_mutex);
return ret;
}
ret = printk_start_offload_kthreads();
- mutex_unlock(&printk_kthread_mutex);
+ mutex_unlock(&printing_kthread_mutex);
return ret;
}
static void printk_offload_init(void)
{
- mutex_lock(&printk_kthread_mutex);
+ mutex_lock(&printing_kthread_mutex);
if (num_possible_cpus() <= 1) {
/* Offloading doesn't make sense. Disable print offloading. */
printk_offload_chars = 0;
} else
printk_start_offload_kthreads();
- mutex_unlock(&printk_kthread_mutex);
+ mutex_unlock(&printing_kthread_mutex);
}
#else /* CONFIG_PRINTK_OFFLOAD */
--
2.1.4
next prev parent reply other threads:[~2015-10-26 4:53 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-10-26 4:52 [PATCH 0/6 v2] printk: Softlockup avoidance Jan Kara
2015-10-26 4:52 ` [PATCH 1/7] printk: Hand over printing to console if printing too long Jan Kara
2016-03-01 17:22 ` Denys Vlasenko
2016-03-02 9:30 ` Jan Kara
2015-10-26 4:52 ` [PATCH 2/7] printk: Start printing handover kthreads on demand Jan Kara
2015-10-26 4:52 ` [PATCH 3/7] kernel: Avoid softlockups in stop_machine() during heavy printing Jan Kara
2015-10-26 4:56 ` Jan Kara
2015-10-26 4:52 ` [PATCH 4/7] panic: Always flush printk buffer before panic Jan Kara
2015-10-26 4:52 ` [PATCH 5/7] printk: Add config option for disabling printk offloading Jan Kara
2015-10-26 4:52 ` Jan Kara [this message]
-- strict thread matches above, loose matches on Subject: below --
2015-12-10 15:19 [PATCH 6/7] printk: Avoid scheduling printing threads on the same CPU Sergey Senozhatsky
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1445835169-8203-7-git-send-email-jack@suse.com \
--to=jack@suse.com \
--cc=akpm@linux-foundation.org \
--cc=kys@microsoft.com \
--cc=linux-kernel@vger.kernel.org \
--cc=pmladek@suse.com \
--cc=rostedt@goodmis.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).