From: Peter Zijlstra <a.p.zijlstra@chello.nl>
To: Paul Mackerras <paulus@samba.org>
Cc: Ingo Molnar <mingo@elte.hu>,
linux-kernel@vger.kernel.org,
Corey Ashford <cjashfor@linux.vnet.ibm.com>,
Thomas Gleixner <tglx@linutronix.de>
Subject: Re: [PATCH 2/2] perf_counter: optimize context switch between identical inherited contexts
Date: Mon, 25 May 2009 12:38:07 +0200 [thread overview]
Message-ID: <1243247887.26820.651.camel@twins> (raw)
In-Reply-To: <18969.58137.356360.144339@cargo.ozlabs.ibm.com>
On Mon, 2009-05-25 at 10:15 +1000, Paul Mackerras wrote:
> Peter Zijlstra writes:
>
> > Ingo just pointed out that there is nothing there to close the race with
> > attaching a counter.
> >
> > That is, you could end up attaching your counter to the wrong task.
>
> Good point. Doing the unclone in find_get_context would be a start,
> but the locking could get tricky (in fact we don't have any way to
> remove an inherited counter from a context, so we only have to worry
> about counters being attached). I'll work out a solution after I have
> digested your recent batch of patches.
I'm currently staring at something like the below, trying to find races
etc.. ;-)
attach vs destroy vs flip
---
Index: linux-2.6/kernel/perf_counter.c
===================================================================
--- linux-2.6.orig/kernel/perf_counter.c
+++ linux-2.6/kernel/perf_counter.c
@@ -102,13 +102,29 @@ static void get_ctx(struct perf_counter_
atomic_inc(&ctx->refcount);
}
-static void put_ctx(struct perf_counter_context *ctx)
+static void free_ctx_rcu(struct rcu_head *head)
+{
+ struct perf_counter_context *ctx;
+
+ ctx = container_of(head, struct perf_counter_context, rcu_head);
+ kfree(ctx);
+}
+
+static bool __put_ctx(struct perf_counter_context *ctx)
{
if (atomic_dec_and_test(&ctx->refcount)) {
if (ctx->parent_ctx)
put_ctx(ctx->parent_ctx);
- kfree(ctx);
+ return true;
}
+
+ return false;
+}
+
+static void put_ctx(struct perf_counter_context *ctx)
+{
+ if (__put_ctx(ctx))
+ call_rcu(&ctx->rcu_head, free_ctx_rcu);
}
/*
@@ -934,8 +950,16 @@ void perf_counter_task_sched_out(struct
next_ctx = next->perf_counter_ctxp;
if (next_ctx && context_equiv(ctx, next_ctx)) {
+ ctx->task = NULL;
+ next_ctx->task = NULL;
+
+ smp_wmb();
+
task->perf_counter_ctxp = next_ctx;
next->perf_counter_ctxp = ctx;
+
+ smp_wmb();
+
ctx->task = next;
next_ctx->task = task;
return;
@@ -1284,19 +1308,31 @@ static struct perf_counter_context *find
return ERR_PTR(-EACCES);
}
+ rcu_read_lock();
+again:
ctx = task->perf_counter_ctxp;
+ /*
+ * matched against the xchg() in perf_counter_exit_task() setting
+ * ctx to NULL and the cmpxchg() below.
+ */
+ smp_read_barrier_depends();
if (!ctx) {
+ rcu_read_unlock();
+ /*
+ * cannot attach counters to a dying task.
+ */
+ if (task->flags & PF_EXITING) {
+ put_task_struct(task);
+ return ERR_PTR(-ESRCH);
+ }
+
ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
if (!ctx) {
put_task_struct(task);
return ERR_PTR(-ENOMEM);
}
__perf_counter_init_context(ctx, task);
- /*
- * Make sure other cpus see correct values for *ctx
- * once task->perf_counter_ctxp is visible to them.
- */
- smp_wmb();
+ rcu_read_lock();
tctx = cmpxchg(&task->perf_counter_ctxp, NULL, ctx);
if (tctx) {
/*
@@ -1308,6 +1344,16 @@ static struct perf_counter_context *find
}
}
+ if (!atomic_inc_not_zero(&ctx->reference))
+ goto again;
+
+ if (rcu_dereference(ctx->task) != task) {
+ put_ctx(ctx);
+ goto again;
+ }
+
+ rcu_read_unlock();
+
return ctx;
}
@@ -1316,7 +1362,6 @@ static void free_counter_rcu(struct rcu_
struct perf_counter *counter;
counter = container_of(head, struct perf_counter, rcu_head);
- put_ctx(counter->ctx);
kfree(counter);
}
@@ -1337,6 +1382,7 @@ static void free_counter(struct perf_cou
if (counter->destroy)
counter->destroy(counter);
+ put_ctx(counter->ctx);
call_rcu(&counter->rcu_head, free_counter_rcu);
}
@@ -3231,6 +3277,7 @@ SYSCALL_DEFINE5(perf_counter_open,
out_fput:
fput_light(group_file, fput_needed);
+ put_ctx(ctx);
return ret;
@@ -3390,25 +3437,25 @@ __perf_counter_exit_task(struct task_str
*
* Note: we may be running in child context, but the PID is not hashed
* anymore so new counters will not be added.
- * (XXX not sure that is true when we get called from flush_old_exec.
- * -- paulus)
*/
void perf_counter_exit_task(struct task_struct *child)
{
struct perf_counter *child_counter, *tmp;
struct perf_counter_context *child_ctx;
unsigned long flags;
+ bool free_ctx;
WARN_ON_ONCE(child != current);
- child_ctx = child->perf_counter_ctxp;
+ child_ctx = xchg(&child->perf_counter_ctxp, NULL);
if (likely(!child_ctx))
return;
+ free_ctx = __put_ctx(child_ctx);
+
local_irq_save(flags);
__perf_counter_task_sched_out(child_ctx);
- child->perf_counter_ctxp = NULL;
local_irq_restore(flags);
mutex_lock(&child_ctx->mutex);
@@ -3428,7 +3475,8 @@ again:
mutex_unlock(&child_ctx->mutex);
- put_ctx(child_ctx);
+ if (free_ctx)
+ call_rcu(&ctx->rcu_head, free_ctx_rcu);
}
/*
next prev parent reply other threads:[~2009-05-25 10:38 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2009-05-22 4:17 [PATCH 1/2] perf_counter: dynamically allocate tasks' perf_counter_context struct [v2] Paul Mackerras
2009-05-22 4:27 ` [PATCH 2/2] perf_counter: optimize context switch between identical inherited contexts Paul Mackerras
2009-05-22 8:16 ` Peter Zijlstra
2009-05-22 9:56 ` Paul Mackerras
2009-05-22 10:08 ` Peter Zijlstra
2009-05-23 12:38 ` Ingo Molnar
2009-05-23 13:06 ` Peter Zijlstra
2009-05-24 23:55 ` Paul Mackerras
2009-05-22 8:32 ` Peter Zijlstra
2009-05-22 8:57 ` Ingo Molnar
2009-05-22 9:02 ` Peter Zijlstra
2009-05-22 10:14 ` Ingo Molnar
2009-05-22 9:29 ` Paul Mackerras
2009-05-22 9:22 ` Peter Zijlstra
2009-05-22 9:42 ` Peter Zijlstra
2009-05-22 10:07 ` Paul Mackerras
2009-05-22 10:05 ` Paul Mackerras
2009-05-22 10:11 ` Ingo Molnar
2009-05-22 10:27 ` [tip:perfcounters/core] perf_counter: Optimize " tip-bot for Paul Mackerras
2009-05-24 11:33 ` Ingo Molnar
2009-05-25 6:18 ` Paul Mackerras
2009-05-25 6:54 ` Ingo Molnar
2009-05-22 10:36 ` [tip:perfcounters/core] perf_counter: fix !PERF_COUNTERS build failure tip-bot for Ingo Molnar
2009-05-22 13:46 ` [PATCH 2/2] perf_counter: optimize context switch between identical inherited contexts Peter Zijlstra
2009-05-25 0:15 ` Paul Mackerras
2009-05-25 10:38 ` Peter Zijlstra [this message]
2009-05-25 10:50 ` Peter Zijlstra
2009-05-25 11:06 ` Paul Mackerras
2009-05-25 11:27 ` Peter Zijlstra
2009-05-22 8:06 ` [PATCH 1/2] perf_counter: dynamically allocate tasks' perf_counter_context struct [v2] Peter Zijlstra
2009-05-22 9:30 ` Paul Mackerras
2009-05-22 10:27 ` [tip:perfcounters/core] perf_counter: Dynamically allocate tasks' perf_counter_context struct tip-bot for Paul Mackerras
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1243247887.26820.651.camel@twins \
--to=a.p.zijlstra@chello.nl \
--cc=cjashfor@linux.vnet.ibm.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@elte.hu \
--cc=paulus@samba.org \
--cc=tglx@linutronix.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox