From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail.linuxfoundation.org ([140.211.169.12]:39738 "EHLO mail.linuxfoundation.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752604AbeCVNps (ORCPT ); Thu, 22 Mar 2018 09:45:48 -0400 Subject: Patch "ACPI/processor: Replace racy task affinity logic" has been added to the 4.9-stable tree To: tglx@linutronix.de, alexander.levin@microsoft.com, benh@kernel.crashing.org, bigeasy@linutronix.de, davem@davemloft.net, fenghua.yu@intel.com, gregkh@linuxfoundation.org, herbert@gondor.apana.org.au, jiangshanlai@gmail.com, lenb@kernel.org, mpe@ellerman.id.au, peterz@infradead.org, rjw@rjwysocki.net, tj@kernel.org, tony.luck@intel.com, viresh.kumar@linaro.org Cc: , From: Date: Thu, 22 Mar 2018 14:45:42 +0100 Message-ID: <152172634212186@kroah.com> MIME-Version: 1.0 Content-Type: text/plain; charset=ANSI_X3.4-1968 Content-Transfer-Encoding: 8bit Sender: stable-owner@vger.kernel.org List-ID: This is a note to let you know that I've just added the patch titled ACPI/processor: Replace racy task affinity logic to the 4.9-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: acpi-processor-replace-racy-task-affinity-logic.patch and it can be found in the queue-4.9 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let know about it. >>From foo@baz Thu Mar 22 14:40:23 CET 2018 From: Thomas Gleixner Date: Wed, 12 Apr 2017 22:07:34 +0200 Subject: ACPI/processor: Replace racy task affinity logic From: Thomas Gleixner [ Upstream commit 8153f9ac43897f9f4786b30badc134fcc1a4fb11 ] acpi_processor_get_throttling() requires to invoke the getter function on the target CPU. This is achieved by temporarily setting the affinity of the calling user space thread to the requested CPU and reset it to the original affinity afterwards. That's racy vs. CPU hotplug and concurrent affinity settings for that thread resulting in code executing on the wrong CPU and overwriting the new affinity setting. acpi_processor_get_throttling() is invoked in two ways: 1) The CPU online callback, which is already running on the target CPU and obviously protected against hotplug and not affected by affinity settings. 2) The ACPI driver probe function, which is not protected against hotplug during modprobe. Switch it over to work_on_cpu() and protect the probe function against CPU hotplug. Signed-off-by: Thomas Gleixner Cc: Fenghua Yu Cc: Tony Luck Cc: Herbert Xu Cc: "Rafael J. Wysocki" Cc: Peter Zijlstra Cc: Benjamin Herrenschmidt Cc: Sebastian Siewior Cc: Lai Jiangshan Cc: linux-acpi@vger.kernel.org Cc: Viresh Kumar Cc: Michael Ellerman Cc: Tejun Heo Cc: "David S. Miller" Cc: Len Brown Link: http://lkml.kernel.org/r/20170412201042.785920903@linutronix.de Signed-off-by: Thomas Gleixner Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/acpi/processor_driver.c | 7 +++- drivers/acpi/processor_throttling.c | 62 ++++++++++++++++++++---------------- 2 files changed, 42 insertions(+), 27 deletions(-) --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -262,11 +262,16 @@ err_power_exit: static int acpi_processor_start(struct device *dev) { struct acpi_device *device = ACPI_COMPANION(dev); + int ret; if (!device) return -ENODEV; - return __acpi_processor_start(device); + /* Protect against concurrent CPU hotplug operations */ + get_online_cpus(); + ret = __acpi_processor_start(device); + put_online_cpus(); + return ret; } static int acpi_processor_stop(struct device *dev) --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -62,8 +62,8 @@ struct acpi_processor_throttling_arg { #define THROTTLING_POSTCHANGE (2) static int acpi_processor_get_throttling(struct acpi_processor *pr); -int acpi_processor_set_throttling(struct acpi_processor *pr, - int state, bool force); +static int __acpi_processor_set_throttling(struct acpi_processor *pr, + int state, bool force, bool direct); static int acpi_processor_update_tsd_coord(void) { @@ -891,7 +891,8 @@ static int acpi_processor_get_throttling ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid throttling state, reset\n")); state = 0; - ret = acpi_processor_set_throttling(pr, state, true); + ret = __acpi_processor_set_throttling(pr, state, true, + true); if (ret) return ret; } @@ -901,36 +902,31 @@ static int acpi_processor_get_throttling return 0; } -static int acpi_processor_get_throttling(struct acpi_processor *pr) +static long __acpi_processor_get_throttling(void *data) { - cpumask_var_t saved_mask; - int ret; + struct acpi_processor *pr = data; + + return pr->throttling.acpi_processor_get_throttling(pr); +} +static int acpi_processor_get_throttling(struct acpi_processor *pr) +{ if (!pr) return -EINVAL; if (!pr->flags.throttling) return -ENODEV; - if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL)) - return -ENOMEM; - /* - * Migrate task to the cpu pointed by pr. + * This is either called from the CPU hotplug callback of + * processor_driver or via the ACPI probe function. In the latter + * case the CPU is not guaranteed to be online. Both call sites are + * protected against CPU hotplug. */ - cpumask_copy(saved_mask, ¤t->cpus_allowed); - /* FIXME: use work_on_cpu() */ - if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) { - /* Can't migrate to the target pr->id CPU. Exit */ - free_cpumask_var(saved_mask); + if (!cpu_online(pr->id)) return -ENODEV; - } - ret = pr->throttling.acpi_processor_get_throttling(pr); - /* restore the previous state */ - set_cpus_allowed_ptr(current, saved_mask); - free_cpumask_var(saved_mask); - return ret; + return work_on_cpu(pr->id, __acpi_processor_get_throttling, pr); } static int acpi_processor_get_fadt_info(struct acpi_processor *pr) @@ -1080,8 +1076,15 @@ static long acpi_processor_throttling_fn arg->target_state, arg->force); } -int acpi_processor_set_throttling(struct acpi_processor *pr, - int state, bool force) +static int call_on_cpu(int cpu, long (*fn)(void *), void *arg, bool direct) +{ + if (direct) + return fn(arg); + return work_on_cpu(cpu, fn, arg); +} + +static int __acpi_processor_set_throttling(struct acpi_processor *pr, + int state, bool force, bool direct) { int ret = 0; unsigned int i; @@ -1130,7 +1133,8 @@ int acpi_processor_set_throttling(struct arg.pr = pr; arg.target_state = state; arg.force = force; - ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg); + ret = call_on_cpu(pr->id, acpi_processor_throttling_fn, &arg, + direct); } else { /* * When the T-state coordination is SW_ALL or HW_ALL, @@ -1163,8 +1167,8 @@ int acpi_processor_set_throttling(struct arg.pr = match_pr; arg.target_state = state; arg.force = force; - ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, - &arg); + ret = call_on_cpu(pr->id, acpi_processor_throttling_fn, + &arg, direct); } } /* @@ -1182,6 +1186,12 @@ int acpi_processor_set_throttling(struct return ret; } +int acpi_processor_set_throttling(struct acpi_processor *pr, int state, + bool force) +{ + return __acpi_processor_set_throttling(pr, state, force, false); +} + int acpi_processor_get_throttling_info(struct acpi_processor *pr) { int result = 0; Patches currently in stable-queue which might be from tglx@linutronix.de are queue-4.9/acpi-processor-fix-error-handling-in-__acpi_processor_start.patch queue-4.9/x86-reboot-turn-off-kvm-when-halting-a-cpu.patch queue-4.9/acpi-processor-replace-racy-task-affinity-logic.patch queue-4.9/time-change-posix-clocks-ops-interfaces-to-use-timespec64.patch queue-4.9/genirq-use-irqd_get_trigger_type-to-compare-the-trigger-type-for-shared-irqs.patch queue-4.9/cpufreq-sh-replace-racy-task-affinity-logic.patch queue-4.9/irqchip-mips-gic-separate-ipi-reservation-usage-tracking.patch queue-4.9/x86-kaslr-fix-kexec-kernel-boot-crash-when-kaslr-randomization-fails.patch