linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] perf/core: fix RCU issues with cgroup monitoring mode
@ 2015-10-17  1:28 Stephane Eranian
  2015-10-17  9:56 ` Peter Zijlstra
  2015-10-17 10:00 ` Peter Zijlstra
  0 siblings, 2 replies; 7+ messages in thread
From: Stephane Eranian @ 2015-10-17  1:28 UTC (permalink / raw)
  To: linux-kernel; +Cc: peterz, mingo, ak, edumazet, acme


This patch eliminates all known RCU violations detected
by the RCU  checker (PROVE_RCU). The impact code paths
were all related to cgroup mode monitoring and involved
access a task's cgrp.

Patch is relative to tip.git at commit:
	f8e9941 Merge branch 'x86/urgent'

Signed-off-by: Stephane Eranian <eranian@google.com>
---
 kernel/events/core.c | 23 ++++++++++++++++++++---
 1 file changed, 20 insertions(+), 3 deletions(-)

diff --git a/kernel/events/core.c b/kernel/events/core.c
index ea02109..65c4ffa 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -435,12 +435,16 @@ static inline void update_cgrp_time_from_event(struct perf_event *event)
 	if (!is_cgroup_event(event))
 		return;
 
+	rcu_read_lock();
+
 	cgrp = perf_cgroup_from_task(current);
 	/*
 	 * Do not update time when cgroup is not active
 	 */
 	if (cgrp == event->cgrp)
 		__update_cgrp_time(event->cgrp);
+
+	rcu_read_unlock();
 }
 
 static inline void
@@ -458,9 +462,11 @@ perf_cgroup_set_timestamp(struct task_struct *task,
 	if (!task || !ctx->nr_cgroups)
 		return;
 
+	rcu_read_lock();
 	cgrp = perf_cgroup_from_task(task);
 	info = this_cpu_ptr(cgrp->info);
 	info->timestamp = ctx->timestamp;
+	rcu_read_unlock();
 }
 
 #define PERF_CGROUP_SWOUT	0x1 /* cgroup switch out every event */
@@ -489,7 +495,6 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
 	 * we reschedule only in the presence of cgroup
 	 * constrained events.
 	 */
-	rcu_read_lock();
 
 	list_for_each_entry_rcu(pmu, &pmus, entry) {
 		cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
@@ -531,8 +536,6 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
 		}
 	}
 
-	rcu_read_unlock();
-
 	local_irq_restore(flags);
 }
 
@@ -542,6 +545,7 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
 	struct perf_cgroup *cgrp1;
 	struct perf_cgroup *cgrp2 = NULL;
 
+	rcu_read_lock();
 	/*
 	 * we come here when we know perf_cgroup_events > 0
 	 */
@@ -561,6 +565,8 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
 	 */
 	if (cgrp1 != cgrp2)
 		perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
+
+	rcu_read_unlock();
 }
 
 static inline void perf_cgroup_sched_in(struct task_struct *prev,
@@ -569,6 +575,7 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
 	struct perf_cgroup *cgrp1;
 	struct perf_cgroup *cgrp2 = NULL;
 
+	rcu_read_lock();
 	/*
 	 * we come here when we know perf_cgroup_events > 0
 	 */
@@ -577,6 +584,7 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
 	/* prev can never be NULL */
 	cgrp2 = perf_cgroup_from_task(prev);
 
+
 	/*
 	 * only need to schedule in cgroup events if we are changing
 	 * cgroup during ctxsw. Cgroup events were not scheduled
@@ -584,6 +592,8 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
 	 */
 	if (cgrp1 != cgrp2)
 		perf_cgroup_switch(task, PERF_CGROUP_SWIN);
+
+	rcu_read_unlock();
 }
 
 static inline int perf_cgroup_connect(int fd, struct perf_event *event,
@@ -2094,6 +2104,7 @@ static int  __perf_install_in_context(void *info)
 		cpuctx->task_ctx = task_ctx;
 		task = task_ctx->task;
 	}
+	rcu_read_lock();
 
 	cpu_ctx_sched_out(cpuctx, EVENT_ALL);
 
@@ -2112,6 +2123,8 @@ static int  __perf_install_in_context(void *info)
 	 */
 	perf_event_sched_in(cpuctx, task_ctx, task);
 
+	rcu_read_unlock();
+
 	perf_pmu_enable(cpuctx->ctx.pmu);
 	perf_ctx_unlock(cpuctx, task_ctx);
 
@@ -2398,7 +2411,9 @@ static void ctx_sched_out(struct perf_event_context *ctx,
 		return;
 
 	update_context_time(ctx);
+	rcu_read_lock();
 	update_cgrp_time_from_cpuctx(cpuctx);
+	rcu_read_unlock();
 	if (!ctx->nr_active)
 		return;
 
@@ -9442,7 +9457,9 @@ static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
 static int __perf_cgroup_move(void *info)
 {
 	struct task_struct *task = info;
+	rcu_read_lock();
 	perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
+	rcu_read_unlock();
 	return 0;
 }
 
-- 
2.1.0


^ permalink raw reply related	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2015-10-26 20:20 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-10-17  1:28 [PATCH] perf/core: fix RCU issues with cgroup monitoring mode Stephane Eranian
2015-10-17  9:56 ` Peter Zijlstra
2015-10-19  7:58   ` Stephane Eranian
2015-10-19  9:12     ` Peter Zijlstra
2015-10-26 19:27       ` Stephane Eranian
2015-10-26 20:20         ` Peter Zijlstra
2015-10-17 10:00 ` Peter Zijlstra

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).