From: Peter Zijlstra <peterz@infradead.org>
To: mingo@kernel.org, andrii@kernel.org, oleg@redhat.com
Cc: linux-kernel@vger.kernel.org, linux-trace-kernel@vger.kernel.org,
peterz@infradead.org, rostedt@goodmis.org, mhiramat@kernel.org,
jolsa@kernel.org, clm@meta.com, paulmck@kernel.org
Subject: [PATCH v2 04/11] perf/uprobe: RCU-ify find_uprobe()
Date: Thu, 11 Jul 2024 13:02:39 +0200 [thread overview]
Message-ID: <20240711110400.635302571@infradead.org> (raw)
In-Reply-To: 20240711110235.098009979@infradead.org
With handle_swbp() triggering concurrently on (all) CPUs, tree_lock
becomes a bottleneck. Avoid treelock by doing RCU lookups of the
uprobe.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
kernel/events/uprobes.c | 49 +++++++++++++++++++++++++++++++++++++++---------
1 file changed, 40 insertions(+), 9 deletions(-)
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -40,6 +40,7 @@ static struct rb_root uprobes_tree = RB_
#define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree)
static DEFINE_RWLOCK(uprobes_treelock); /* serialize rbtree access */
+static seqcount_rwlock_t uprobes_seqcount = SEQCNT_RWLOCK_ZERO(uprobes_seqcount, &uprobes_treelock);
#define UPROBES_HASH_SZ 13
/* serialize uprobe->pending_list */
@@ -54,6 +55,7 @@ DEFINE_STATIC_PERCPU_RWSEM(dup_mmap_sem)
struct uprobe {
struct rb_node rb_node; /* node in the rb tree */
refcount_t ref;
+ struct rcu_head rcu;
struct rw_semaphore register_rwsem;
struct rw_semaphore consumer_rwsem;
struct list_head pending_list;
@@ -587,12 +589,25 @@ set_orig_insn(struct arch_uprobe *auprob
*(uprobe_opcode_t *)&auprobe->insn);
}
+static struct uprobe *try_get_uprobe(struct uprobe *uprobe)
+{
+ if (refcount_inc_not_zero(&uprobe->ref))
+ return uprobe;
+ return NULL;
+}
+
static struct uprobe *get_uprobe(struct uprobe *uprobe)
{
refcount_inc(&uprobe->ref);
return uprobe;
}
+static void uprobe_free_rcu(struct rcu_head *rcu)
+{
+ struct uprobe *uprobe = container_of(rcu, struct uprobe, rcu);
+ kfree(uprobe);
+}
+
static void put_uprobe(struct uprobe *uprobe)
{
if (refcount_dec_and_test(&uprobe->ref)) {
@@ -604,7 +619,7 @@ static void put_uprobe(struct uprobe *up
mutex_lock(&delayed_uprobe_lock);
delayed_uprobe_remove(uprobe, NULL);
mutex_unlock(&delayed_uprobe_lock);
- kfree(uprobe);
+ call_rcu(&uprobe->rcu, uprobe_free_rcu);
}
}
@@ -653,10 +668,10 @@ static struct uprobe *__find_uprobe(stru
.inode = inode,
.offset = offset,
};
- struct rb_node *node = rb_find(&key, &uprobes_tree, __uprobe_cmp_key);
+ struct rb_node *node = rb_find_rcu(&key, &uprobes_tree, __uprobe_cmp_key);
if (node)
- return get_uprobe(__node_2_uprobe(node));
+ return try_get_uprobe(__node_2_uprobe(node));
return NULL;
}
@@ -667,20 +682,32 @@ static struct uprobe *__find_uprobe(stru
*/
static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
{
- struct uprobe *uprobe;
+ unsigned int seq;
- read_lock(&uprobes_treelock);
- uprobe = __find_uprobe(inode, offset);
- read_unlock(&uprobes_treelock);
+ guard(rcu)();
- return uprobe;
+ do {
+ seq = read_seqcount_begin(&uprobes_seqcount);
+ struct uprobe *uprobe = __find_uprobe(inode, offset);
+ if (uprobe) {
+ /*
+ * Lockless RB-tree lookups are prone to false-negatives.
+ * If they find something, it's good. If they do not find,
+ * it needs to be validated.
+ */
+ return uprobe;
+ }
+ } while (read_seqcount_retry(&uprobes_seqcount, seq));
+
+ /* Really didn't find anything. */
+ return NULL;
}
static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
{
struct rb_node *node;
- node = rb_find_add(&uprobe->rb_node, &uprobes_tree, __uprobe_cmp);
+ node = rb_find_add_rcu(&uprobe->rb_node, &uprobes_tree, __uprobe_cmp);
if (node)
return get_uprobe(__node_2_uprobe(node));
@@ -702,7 +729,9 @@ static struct uprobe *insert_uprobe(stru
struct uprobe *u;
write_lock(&uprobes_treelock);
+ write_seqcount_begin(&uprobes_seqcount);
u = __insert_uprobe(uprobe);
+ write_seqcount_end(&uprobes_seqcount);
write_unlock(&uprobes_treelock);
return u;
@@ -936,7 +965,9 @@ static void delete_uprobe(struct uprobe
return;
write_lock(&uprobes_treelock);
+ write_seqcount_begin(&uprobes_seqcount);
rb_erase(&uprobe->rb_node, &uprobes_tree);
+ write_seqcount_end(&uprobes_seqcount);
write_unlock(&uprobes_treelock);
RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */
put_uprobe(uprobe);
next prev parent reply other threads:[~2024-07-11 11:07 UTC|newest]
Thread overview: 45+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-07-11 11:02 [PATCH v2 00/11] perf/uprobe: Optimize uprobes Peter Zijlstra
2024-07-11 11:02 ` [PATCH v2 01/11] perf/uprobe: Re-indent labels Peter Zijlstra
2024-07-11 11:58 ` Jiri Olsa
2024-07-11 12:07 ` Peter Zijlstra
2024-07-11 11:02 ` [PATCH v2 02/11] perf/uprobe: Remove spurious whitespace Peter Zijlstra
2024-07-11 11:02 ` [PATCH v2 03/11] rbtree: Provide rb_find_rcu() / rb_find_add_rcu() Peter Zijlstra
2024-07-12 20:23 ` Andrii Nakryiko
2024-07-15 11:21 ` Peter Zijlstra
2024-07-15 17:13 ` Andrii Nakryiko
2024-07-11 11:02 ` Peter Zijlstra [this message]
2024-07-11 13:59 ` [PATCH v2 04/11] perf/uprobe: RCU-ify find_uprobe() Masami Hiramatsu
2024-07-11 11:02 ` [PATCH v2 05/11] perf/uprobe: Simplify UPROBE_HANDLER_REMOVE logic Peter Zijlstra
2024-07-11 11:02 ` [PATCH v2 06/11] perf/uprobe: SRCU-ify uprobe->consumer list Peter Zijlstra
2024-07-12 21:06 ` Andrii Nakryiko
2024-07-15 11:25 ` Peter Zijlstra
2024-07-15 17:30 ` Andrii Nakryiko
2024-07-11 11:02 ` [PATCH v2 07/11] perf/uprobe: Split uprobe_unregister() Peter Zijlstra
2024-07-12 21:10 ` Andrii Nakryiko
2024-07-11 11:02 ` [PATCH v2 08/11] perf/uprobe: Convert (some) uprobe->refcount to SRCU Peter Zijlstra
2024-07-11 14:03 ` Jiri Olsa
2024-07-12 21:21 ` Andrii Nakryiko
2024-07-11 11:02 ` [PATCH v2 09/11] srcu: Add __srcu_clone_read_lock() Peter Zijlstra
2024-07-11 11:02 ` [PATCH v2 10/11] perf/uprobe: Convert single-step and uretprobe to SRCU Peter Zijlstra
2024-07-11 16:06 ` Oleg Nesterov
2024-07-11 18:42 ` Peter Zijlstra
2024-07-12 10:26 ` Oleg Nesterov
2024-07-12 21:28 ` Andrii Nakryiko
2024-07-15 11:59 ` Peter Zijlstra
2024-07-11 11:02 ` [PATCH v2 11/11] perf/uprobe: Add uretprobe timer Peter Zijlstra
2024-07-11 13:19 ` Oleg Nesterov
2024-07-11 15:00 ` Peter Zijlstra
2024-07-11 15:55 ` Peter Zijlstra
2024-07-11 16:06 ` Peter Zijlstra
2024-07-12 21:43 ` Andrii Nakryiko
2024-07-15 11:41 ` Peter Zijlstra
2024-07-15 17:34 ` Andrii Nakryiko
2024-07-12 4:57 ` [PATCH v2 00/11] perf/uprobe: Optimize uprobes Andrii Nakryiko
2024-07-12 9:13 ` Peter Zijlstra
2024-07-12 13:10 ` Peter Zijlstra
2024-07-12 15:29 ` Andrii Nakryiko
2024-07-15 14:45 ` Peter Zijlstra
2024-07-15 17:10 ` Andrii Nakryiko
2024-07-15 18:10 ` Andrii Nakryiko
2024-07-19 18:42 ` Andrii Nakryiko
2024-07-27 0:18 ` Andrii Nakryiko
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240711110400.635302571@infradead.org \
--to=peterz@infradead.org \
--cc=andrii@kernel.org \
--cc=clm@meta.com \
--cc=jolsa@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-trace-kernel@vger.kernel.org \
--cc=mhiramat@kernel.org \
--cc=mingo@kernel.org \
--cc=oleg@redhat.com \
--cc=paulmck@kernel.org \
--cc=rostedt@goodmis.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).