linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 6/7] printk: Avoid scheduling printing threads on the same CPU
  2015-10-26  4:52 [PATCH 0/6 v2] printk: Softlockup avoidance Jan Kara
@ 2015-10-26  4:52 ` Jan Kara
  0 siblings, 0 replies; 2+ messages in thread
From: Jan Kara @ 2015-10-26  4:52 UTC (permalink / raw)
  To: Andrew Morton; +Cc: LKML, pmladek, KY Srinivasan, rostedt, Jan Kara

Currently nothing forces the scheduler to schedule printing kthread on
the same CPU that is currently doing printing. In fact in some KVM
configurations this seems to happen rather frequently and it defeats
printing offloading since the current CPU is doing printing and watching
for printing kthread to come and take over however that never happens
because that kthread has been scheduled on the very same CPU.

Fix the problem by allowing each printing kthread to be scheduled only
on a subset of CPUs and these subsets are disjoint so at least one of
the kthreads is guaranteed to be able to take over printing. CPU hotplug
makes this more difficult than it should be but we cope by
redistributing kthreads among CPUs when some kthread is not able to run
anywhere.

Signed-off-by: Jan Kara <jack@suse.com>
---
 kernel/printk/printk.c | 105 ++++++++++++++++++++++++++++++++++++++++++++-----
 1 file changed, 96 insertions(+), 9 deletions(-)

diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 5153c6518b9d..72334ed42942 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -101,8 +101,10 @@ static atomic_t printing_tasks_spinning = ATOMIC_INIT(0);
 #define PRINTING_TASKS 2
 /* Pointers to printing kthreads */
 static struct task_struct *printing_kthread[PRINTING_TASKS];
+/* Masks of cpus allowed for printing kthreads */
+static struct cpumask *printing_kthread_mask[PRINTING_TASKS];
 /* Serialization of changes to printk_offload_chars and kthread creation */
-static DEFINE_MUTEX(printk_kthread_mutex);
+static DEFINE_MUTEX(printing_kthread_mutex);
 
 /* Wait queue printing kthreads sleep on when idle */
 static DECLARE_WAIT_QUEUE_HEAD(print_queue);
@@ -2840,28 +2842,113 @@ static int printing_task(void *arg)
 	return 0;
 }
 
+/* Divide online cpus among printing kthreads */
+static void distribute_printing_kthreads(void)
+{
+	int i;
+	unsigned int cpus_per_thread;
+	unsigned int cpu, seen_cpu;
+
+	for (i = 0; i < PRINTING_TASKS; i++)
+		cpumask_clear(printing_kthread_mask[i]);
+
+	cpus_per_thread = DIV_ROUND_UP(num_online_cpus(), PRINTING_TASKS);
+	seen_cpu = 0;
+	for_each_online_cpu(cpu) {
+		cpumask_set_cpu(cpu,
+			printing_kthread_mask[seen_cpu / cpus_per_thread]);
+		seen_cpu++;
+	}
+
+	for (i = 0; i < PRINTING_TASKS; i++)
+		if (!cpumask_empty(printing_kthread_mask[i]))
+			set_cpus_allowed_ptr(printing_kthread[i],
+					     printing_kthread_mask[i]);
+}
+
+static int printing_kthread_cpu_notify(struct notifier_block *nfb,
+				       unsigned long action, void *hcpu)
+{
+	unsigned int cpu = (unsigned long)hcpu;
+	int i;
+
+	if (printk_offload_chars == 0)
+		goto out;
+
+	/* Get exclusion against turning of printk offload off... */
+	mutex_lock(&printing_kthread_mutex);
+	/* Now a reliable check if printk offload is enabled */
+	if (printk_offload_chars == 0) {
+		mutex_unlock(&printing_kthread_mutex);
+		goto out;
+	}
+
+	if (action == CPU_ONLINE) {
+		/*
+		 * Allow some task to use the CPU. We don't want to spend too
+		 * much time with fair distribution so just guess. We do a fair
+		 * redistribution if some task has no cpu to run on.
+		 */
+		i = cpu % PRINTING_TASKS;
+		cpumask_set_cpu(cpu, printing_kthread_mask[i]);
+		set_cpus_allowed_ptr(printing_kthread[i],
+				     printing_kthread_mask[i]);
+	}
+	if (action == CPU_DEAD) {
+
+		for (i = 0; i < PRINTING_TASKS; i++) {
+			if (cpumask_test_cpu(cpu, printing_kthread_mask[i])) {
+				cpumask_clear_cpu(cpu,
+						  printing_kthread_mask[i]);
+				if (cpumask_empty(printing_kthread_mask[i]))
+					distribute_printing_kthreads();
+				break;
+			}
+		}
+	}
+	mutex_unlock(&printing_kthread_mutex);
+out:
+	return NOTIFY_OK;
+}
+
 static int printk_start_offload_kthreads(void)
 {
 	int i;
 	struct task_struct *task;
+	int ret;
 
 	/* Does handover of printing make any sense? */
 	if (printk_offload_chars == 0 || num_possible_cpus() <= 1)
 		return 0;
+
 	for (i = 0; i < PRINTING_TASKS; i++) {
 		if (printing_kthread[i])
 			continue;
+		printing_kthread_mask[i] = kmalloc(cpumask_size(), GFP_KERNEL);
+		if (!printing_kthread_mask[i]) {
+			pr_err("printk: Cannot allocate cpumask for printing "
+			       "thread.\n");
+			ret = -ENOMEM;
+			goto out_err;
+		}
 		task = kthread_run(printing_task, NULL, "print/%d", i);
-		if (IS_ERR(task))
+		if (IS_ERR(task)) {
+			kfree(printing_kthread_mask[i]);
+			pr_err("printk: Cannot create printing thread: %ld\n",
+			       PTR_ERR(task));
+			ret = PTR_ERR(task);
 			goto out_err;
+		}
 		printing_kthread[i] = task;
 	}
+
+	hotcpu_notifier(printing_kthread_cpu_notify, 0);
+	distribute_printing_kthreads();
 	return 0;
 out_err:
-	pr_err("printk: Cannot create printing thread: %ld\n", PTR_ERR(task));
 	/* Disable offloading if creating kthreads failed */
 	printk_offload_chars = 0;
-	return PTR_ERR(task);
+	return ret;
 }
 
 static int offload_chars_set(const char *val, const struct kernel_param *kp)
@@ -2869,26 +2956,26 @@ static int offload_chars_set(const char *val, const struct kernel_param *kp)
 	int ret;
 
 	/* Protect against parallel change of printk_offload_chars */
-	mutex_lock(&printk_kthread_mutex);
+	mutex_lock(&printing_kthread_mutex);
 	ret = param_set_uint(val, kp);
 	if (ret) {
-		mutex_unlock(&printk_kthread_mutex);
+		mutex_unlock(&printing_kthread_mutex);
 		return ret;
 	}
 	ret = printk_start_offload_kthreads();
-	mutex_unlock(&printk_kthread_mutex);
+	mutex_unlock(&printing_kthread_mutex);
 	return ret;
 }
 
 static void printk_offload_init(void)
 {
-	mutex_lock(&printk_kthread_mutex);
+	mutex_lock(&printing_kthread_mutex);
 	if (num_possible_cpus() <= 1) {
 		/* Offloading doesn't make sense. Disable print offloading. */
 		printk_offload_chars = 0;
 	} else
 		printk_start_offload_kthreads();
-	mutex_unlock(&printk_kthread_mutex);
+	mutex_unlock(&printing_kthread_mutex);
 }
 
 #else	/* CONFIG_PRINTK_OFFLOAD */
-- 
2.1.4


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH 6/7] printk: Avoid scheduling printing threads on the same CPU
@ 2015-12-10 15:19 Sergey Senozhatsky
  0 siblings, 0 replies; 2+ messages in thread
From: Sergey Senozhatsky @ 2015-12-10 15:19 UTC (permalink / raw)
  To: Jan Kara
  Cc: Andrew Morton, Petr Mladek, KY Srinivasan, Steven Rostedt,
	linux-kernel, Sergey Senozhatsky, Sergey Senozhatsky

>+static void distribute_printing_kthreads(void)
>+{
>+	int i;
>+	unsigned int cpus_per_thread;
>+	unsigned int cpu, seen_cpu;
>+
>+	for (i = 0; i < PRINTING_TASKS; i++)
>+		cpumask_clear(printing_kthread_mask[i]);
>+
>+	cpus_per_thread = DIV_ROUND_UP(num_online_cpus(), PRINTING_TASKS);
>+	seen_cpu = 0;
>+	for_each_online_cpu(cpu) {
>+		cpumask_set_cpu(cpu,
>+			printing_kthread_mask[seen_cpu / cpus_per_thread]);
>+		seen_cpu++;
>+	}
>+
>+	for (i = 0; i < PRINTING_TASKS; i++)
>+		if (!cpumask_empty(printing_kthread_mask[i]))
>+			set_cpus_allowed_ptr(printing_kthread[i],
>+					     printing_kthread_mask[i]);
>+}


I certainly understand what are you trying to do here, but I'm a bit concerned.
This may result in 'bad' affinities on big.LITTLE platforms, for example. So I
think that printk is not quite good place for that type of decisions. Just my 5
cents.

	-ss

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2015-12-10 15:20 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-12-10 15:19 [PATCH 6/7] printk: Avoid scheduling printing threads on the same CPU Sergey Senozhatsky
  -- strict thread matches above, loose matches on Subject: below --
2015-10-26  4:52 [PATCH 0/6 v2] printk: Softlockup avoidance Jan Kara
2015-10-26  4:52 ` [PATCH 6/7] printk: Avoid scheduling printing threads on the same CPU Jan Kara

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).