* [PATCH AUTOSEL 5.19 05/29] powerpc/hw_breakpoint: Avoid relying on caller synchronization [not found] <20221018000839.2730954-1-sashal@kernel.org> @ 2022-10-18 0:08 ` Sasha Levin 2022-10-18 1:04 ` Marco Elver 2022-10-18 0:08 ` [PATCH AUTOSEL 5.19 12/29] powerpc/64: don't refer nr_cpu_ids in asm code when it's undefined Sasha Levin 1 sibling, 1 reply; 3+ messages in thread From: Sasha Levin @ 2022-10-18 0:08 UTC (permalink / raw) To: linux-kernel, stable Cc: Sasha Levin, Ian Rogers, Marco Elver, Peter Zijlstra, linuxppc-dev, Dmitry Vyukov From: Marco Elver <elver@google.com> [ Upstream commit f95e5a3d59011eec1257d0e76de1e1f8969d426f ] Internal data structures (cpu_bps, task_bps) of powerpc's hw_breakpoint implementation have relied on nr_bp_mutex serializing access to them. Before overhauling synchronization of kernel/events/hw_breakpoint.c, introduce 2 spinlocks to synchronize cpu_bps and task_bps respectively, thus avoiding reliance on callers synchronizing powerpc's hw_breakpoint. Reported-by: Dmitry Vyukov <dvyukov@google.com> Signed-off-by: Marco Elver <elver@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Dmitry Vyukov <dvyukov@google.com> Acked-by: Ian Rogers <irogers@google.com> Link: https://lore.kernel.org/r/20220829124719.675715-10-elver@google.com Signed-off-by: Sasha Levin <sashal@kernel.org> --- arch/powerpc/kernel/hw_breakpoint.c | 53 ++++++++++++++++++++++------- 1 file changed, 40 insertions(+), 13 deletions(-) diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 2669f80b3a49..8db1a15d7acb 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -15,6 +15,7 @@ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/smp.h> +#include <linux/spinlock.h> #include <linux/debugfs.h> #include <linux/init.h> @@ -129,7 +130,14 @@ struct breakpoint { bool ptrace_bp; }; +/* + * While kernel/events/hw_breakpoint.c does its own synchronization, we cannot + * rely on it safely synchronizing internals here; however, we can rely on it + * not requesting more breakpoints than available. + */ +static DEFINE_SPINLOCK(cpu_bps_lock); static DEFINE_PER_CPU(struct breakpoint *, cpu_bps[HBP_NUM_MAX]); +static DEFINE_SPINLOCK(task_bps_lock); static LIST_HEAD(task_bps); static struct breakpoint *alloc_breakpoint(struct perf_event *bp) @@ -174,7 +182,9 @@ static int task_bps_add(struct perf_event *bp) if (IS_ERR(tmp)) return PTR_ERR(tmp); + spin_lock(&task_bps_lock); list_add(&tmp->list, &task_bps); + spin_unlock(&task_bps_lock); return 0; } @@ -182,6 +192,7 @@ static void task_bps_remove(struct perf_event *bp) { struct list_head *pos, *q; + spin_lock(&task_bps_lock); list_for_each_safe(pos, q, &task_bps) { struct breakpoint *tmp = list_entry(pos, struct breakpoint, list); @@ -191,6 +202,7 @@ static void task_bps_remove(struct perf_event *bp) break; } } + spin_unlock(&task_bps_lock); } /* @@ -200,12 +212,17 @@ static void task_bps_remove(struct perf_event *bp) static bool all_task_bps_check(struct perf_event *bp) { struct breakpoint *tmp; + bool ret = false; + spin_lock(&task_bps_lock); list_for_each_entry(tmp, &task_bps, list) { - if (!can_co_exist(tmp, bp)) - return true; + if (!can_co_exist(tmp, bp)) { + ret = true; + break; + } } - return false; + spin_unlock(&task_bps_lock); + return ret; } /* @@ -215,13 +232,18 @@ static bool all_task_bps_check(struct perf_event *bp) static bool same_task_bps_check(struct perf_event *bp) { struct breakpoint *tmp; + bool ret = false; + spin_lock(&task_bps_lock); list_for_each_entry(tmp, &task_bps, list) { if (tmp->bp->hw.target == bp->hw.target && - !can_co_exist(tmp, bp)) - return true; + !can_co_exist(tmp, bp)) { + ret = true; + break; + } } - return false; + spin_unlock(&task_bps_lock); + return ret; } static int cpu_bps_add(struct perf_event *bp) @@ -234,6 +256,7 @@ static int cpu_bps_add(struct perf_event *bp) if (IS_ERR(tmp)) return PTR_ERR(tmp); + spin_lock(&cpu_bps_lock); cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu); for (i = 0; i < nr_wp_slots(); i++) { if (!cpu_bp[i]) { @@ -241,6 +264,7 @@ static int cpu_bps_add(struct perf_event *bp) break; } } + spin_unlock(&cpu_bps_lock); return 0; } @@ -249,6 +273,7 @@ static void cpu_bps_remove(struct perf_event *bp) struct breakpoint **cpu_bp; int i = 0; + spin_lock(&cpu_bps_lock); cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu); for (i = 0; i < nr_wp_slots(); i++) { if (!cpu_bp[i]) @@ -260,19 +285,25 @@ static void cpu_bps_remove(struct perf_event *bp) break; } } + spin_unlock(&cpu_bps_lock); } static bool cpu_bps_check(int cpu, struct perf_event *bp) { struct breakpoint **cpu_bp; + bool ret = false; int i; + spin_lock(&cpu_bps_lock); cpu_bp = per_cpu_ptr(cpu_bps, cpu); for (i = 0; i < nr_wp_slots(); i++) { - if (cpu_bp[i] && !can_co_exist(cpu_bp[i], bp)) - return true; + if (cpu_bp[i] && !can_co_exist(cpu_bp[i], bp)) { + ret = true; + break; + } } - return false; + spin_unlock(&cpu_bps_lock); + return ret; } static bool all_cpu_bps_check(struct perf_event *bp) @@ -286,10 +317,6 @@ static bool all_cpu_bps_check(struct perf_event *bp) return false; } -/* - * We don't use any locks to serialize accesses to cpu_bps or task_bps - * because are already inside nr_bp_mutex. - */ int arch_reserve_bp_slot(struct perf_event *bp) { int ret; -- 2.35.1 ^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH AUTOSEL 5.19 05/29] powerpc/hw_breakpoint: Avoid relying on caller synchronization 2022-10-18 0:08 ` [PATCH AUTOSEL 5.19 05/29] powerpc/hw_breakpoint: Avoid relying on caller synchronization Sasha Levin @ 2022-10-18 1:04 ` Marco Elver 0 siblings, 0 replies; 3+ messages in thread From: Marco Elver @ 2022-10-18 1:04 UTC (permalink / raw) To: Sasha Levin Cc: Ian Rogers, Peter Zijlstra, linux-kernel, stable, linuxppc-dev, Dmitry Vyukov On Mon, 17 Oct 2022 at 17:08, Sasha Levin <sashal@kernel.org> wrote: > > From: Marco Elver <elver@google.com> > > [ Upstream commit f95e5a3d59011eec1257d0e76de1e1f8969d426f ] > > Internal data structures (cpu_bps, task_bps) of powerpc's hw_breakpoint > implementation have relied on nr_bp_mutex serializing access to them. > > Before overhauling synchronization of kernel/events/hw_breakpoint.c, > introduce 2 spinlocks to synchronize cpu_bps and task_bps respectively, > thus avoiding reliance on callers synchronizing powerpc's hw_breakpoint. > > Reported-by: Dmitry Vyukov <dvyukov@google.com> > Signed-off-by: Marco Elver <elver@google.com> > Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> > Acked-by: Dmitry Vyukov <dvyukov@google.com> > Acked-by: Ian Rogers <irogers@google.com> > Link: https://lore.kernel.org/r/20220829124719.675715-10-elver@google.com > Signed-off-by: Sasha Levin <sashal@kernel.org> Backporting this patch seems unnecessary if we're not backporting the hw_breakpoint overhaul. Without the overhaul, nothing will break without this patch. Thanks, -- Marco > --- > arch/powerpc/kernel/hw_breakpoint.c | 53 ++++++++++++++++++++++------- > 1 file changed, 40 insertions(+), 13 deletions(-) > > diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c > index 2669f80b3a49..8db1a15d7acb 100644 > --- a/arch/powerpc/kernel/hw_breakpoint.c > +++ b/arch/powerpc/kernel/hw_breakpoint.c > @@ -15,6 +15,7 @@ > #include <linux/kernel.h> > #include <linux/sched.h> > #include <linux/smp.h> > +#include <linux/spinlock.h> > #include <linux/debugfs.h> > #include <linux/init.h> > > @@ -129,7 +130,14 @@ struct breakpoint { > bool ptrace_bp; > }; > > +/* > + * While kernel/events/hw_breakpoint.c does its own synchronization, we cannot > + * rely on it safely synchronizing internals here; however, we can rely on it > + * not requesting more breakpoints than available. > + */ > +static DEFINE_SPINLOCK(cpu_bps_lock); > static DEFINE_PER_CPU(struct breakpoint *, cpu_bps[HBP_NUM_MAX]); > +static DEFINE_SPINLOCK(task_bps_lock); > static LIST_HEAD(task_bps); > > static struct breakpoint *alloc_breakpoint(struct perf_event *bp) > @@ -174,7 +182,9 @@ static int task_bps_add(struct perf_event *bp) > if (IS_ERR(tmp)) > return PTR_ERR(tmp); > > + spin_lock(&task_bps_lock); > list_add(&tmp->list, &task_bps); > + spin_unlock(&task_bps_lock); > return 0; > } > > @@ -182,6 +192,7 @@ static void task_bps_remove(struct perf_event *bp) > { > struct list_head *pos, *q; > > + spin_lock(&task_bps_lock); > list_for_each_safe(pos, q, &task_bps) { > struct breakpoint *tmp = list_entry(pos, struct breakpoint, list); > > @@ -191,6 +202,7 @@ static void task_bps_remove(struct perf_event *bp) > break; > } > } > + spin_unlock(&task_bps_lock); > } > > /* > @@ -200,12 +212,17 @@ static void task_bps_remove(struct perf_event *bp) > static bool all_task_bps_check(struct perf_event *bp) > { > struct breakpoint *tmp; > + bool ret = false; > > + spin_lock(&task_bps_lock); > list_for_each_entry(tmp, &task_bps, list) { > - if (!can_co_exist(tmp, bp)) > - return true; > + if (!can_co_exist(tmp, bp)) { > + ret = true; > + break; > + } > } > - return false; > + spin_unlock(&task_bps_lock); > + return ret; > } > > /* > @@ -215,13 +232,18 @@ static bool all_task_bps_check(struct perf_event *bp) > static bool same_task_bps_check(struct perf_event *bp) > { > struct breakpoint *tmp; > + bool ret = false; > > + spin_lock(&task_bps_lock); > list_for_each_entry(tmp, &task_bps, list) { > if (tmp->bp->hw.target == bp->hw.target && > - !can_co_exist(tmp, bp)) > - return true; > + !can_co_exist(tmp, bp)) { > + ret = true; > + break; > + } > } > - return false; > + spin_unlock(&task_bps_lock); > + return ret; > } > > static int cpu_bps_add(struct perf_event *bp) > @@ -234,6 +256,7 @@ static int cpu_bps_add(struct perf_event *bp) > if (IS_ERR(tmp)) > return PTR_ERR(tmp); > > + spin_lock(&cpu_bps_lock); > cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu); > for (i = 0; i < nr_wp_slots(); i++) { > if (!cpu_bp[i]) { > @@ -241,6 +264,7 @@ static int cpu_bps_add(struct perf_event *bp) > break; > } > } > + spin_unlock(&cpu_bps_lock); > return 0; > } > > @@ -249,6 +273,7 @@ static void cpu_bps_remove(struct perf_event *bp) > struct breakpoint **cpu_bp; > int i = 0; > > + spin_lock(&cpu_bps_lock); > cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu); > for (i = 0; i < nr_wp_slots(); i++) { > if (!cpu_bp[i]) > @@ -260,19 +285,25 @@ static void cpu_bps_remove(struct perf_event *bp) > break; > } > } > + spin_unlock(&cpu_bps_lock); > } > > static bool cpu_bps_check(int cpu, struct perf_event *bp) > { > struct breakpoint **cpu_bp; > + bool ret = false; > int i; > > + spin_lock(&cpu_bps_lock); > cpu_bp = per_cpu_ptr(cpu_bps, cpu); > for (i = 0; i < nr_wp_slots(); i++) { > - if (cpu_bp[i] && !can_co_exist(cpu_bp[i], bp)) > - return true; > + if (cpu_bp[i] && !can_co_exist(cpu_bp[i], bp)) { > + ret = true; > + break; > + } > } > - return false; > + spin_unlock(&cpu_bps_lock); > + return ret; > } > > static bool all_cpu_bps_check(struct perf_event *bp) > @@ -286,10 +317,6 @@ static bool all_cpu_bps_check(struct perf_event *bp) > return false; > } > > -/* > - * We don't use any locks to serialize accesses to cpu_bps or task_bps > - * because are already inside nr_bp_mutex. > - */ > int arch_reserve_bp_slot(struct perf_event *bp) > { > int ret; > -- > 2.35.1 > ^ permalink raw reply [flat|nested] 3+ messages in thread
* [PATCH AUTOSEL 5.19 12/29] powerpc/64: don't refer nr_cpu_ids in asm code when it's undefined [not found] <20221018000839.2730954-1-sashal@kernel.org> 2022-10-18 0:08 ` [PATCH AUTOSEL 5.19 05/29] powerpc/hw_breakpoint: Avoid relying on caller synchronization Sasha Levin @ 2022-10-18 0:08 ` Sasha Levin 1 sibling, 0 replies; 3+ messages in thread From: Sasha Levin @ 2022-10-18 0:08 UTC (permalink / raw) To: linux-kernel, stable Cc: Sasha Levin, Stephen Rothwell, Yury Norov, aik, amodra, linuxppc-dev, dja From: Yury Norov <yury.norov@gmail.com> [ Upstream commit 546a073d628111e3338af689938407e77d5dc38f ] generic_secondary_common_init() calls LOAD_REG_ADDR(r7, nr_cpu_ids) conditionally on CONFIG_SMP. However, if 'NR_CPUS == 1', kernel doesn't use the nr_cpu_ids, and in C code, it's just: #if NR_CPUS == 1 #define nr_cpu_ids ... This series makes declaration of nr_cpu_ids conditional on NR_CPUS == 1, and that reveals the issue, because compiler can't link the LOAD_REG_ADDR(r7, nr_cpu_ids) against nonexisting symbol. Current code looks unsafe for those who build kernel with CONFIG_SMP=y and NR_CPUS == 1. This is weird configuration, but not disallowed. Fix the linker error by replacing LOAD_REG_ADDR() with LOAD_REG_IMMEDIATE() conditionally on NR_CPUS == 1. As the following patch adds CONFIG_FORCE_NR_CPUS option that has the similar effect on nr_cpu_ids, make the generic_secondary_common_init() conditional on it too. Reported-by: Stephen Rothwell <sfr@canb.auug.org.au> Signed-off-by: Yury Norov <yury.norov@gmail.com> Signed-off-by: Sasha Levin <sashal@kernel.org> --- arch/powerpc/kernel/head_64.S | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index d3eea633d11a..8408d3f7f61f 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -400,8 +400,12 @@ generic_secondary_common_init: #else LOAD_REG_ADDR(r8, paca_ptrs) /* Load paca_ptrs pointe */ ld r8,0(r8) /* Get base vaddr of array */ +#if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS) + LOAD_REG_IMMEDIATE(r7, NR_CPUS) +#else LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */ lwz r7,0(r7) /* also the max paca allocated */ +#endif li r5,0 /* logical cpu id */ 1: sldi r9,r5,3 /* get paca_ptrs[] index from cpu id */ -- 2.35.1 ^ permalink raw reply related [flat|nested] 3+ messages in thread
end of thread, other threads:[~2022-10-18 1:06 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
[not found] <20221018000839.2730954-1-sashal@kernel.org>
2022-10-18 0:08 ` [PATCH AUTOSEL 5.19 05/29] powerpc/hw_breakpoint: Avoid relying on caller synchronization Sasha Levin
2022-10-18 1:04 ` Marco Elver
2022-10-18 0:08 ` [PATCH AUTOSEL 5.19 12/29] powerpc/64: don't refer nr_cpu_ids in asm code when it's undefined Sasha Levin
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).