linux-trace-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Andrii Nakryiko <andrii@kernel.org>
To: linux-trace-kernel@vger.kernel.org, rostedt@goodmis.org,
	mhiramat@kernel.org, oleg@redhat.com
Cc: peterz@infradead.org, mingo@redhat.com, bpf@vger.kernel.org,
	jolsa@kernel.org, paulmck@kernel.org, clm@meta.com,
	Andrii Nakryiko <andrii@kernel.org>
Subject: [PATCH v2 12/12] uprobes: switch uprobes_treelock to per-CPU RW semaphore
Date: Mon,  1 Jul 2024 15:39:35 -0700	[thread overview]
Message-ID: <20240701223935.3783951-13-andrii@kernel.org> (raw)
In-Reply-To: <20240701223935.3783951-1-andrii@kernel.org>

With all the batch uprobe APIs work we are now finally ready to reap the
benefits. Switch uprobes_treelock from reader-writer spinlock to a much
more efficient and scalable per-CPU RW semaphore.

Benchmarks and numbers time. I've used BPF selftests' bench tool,
trig-uprobe-nop benchmark specifically, to see how uprobe total
throughput scales with number of competing threads (mapped to individual
CPUs). Here are results:

  # threads   BEFORE (mln/s)    AFTER (mln/s)
  ---------   --------------    -------------
  1           3.131             3.140
  2           3.394             3.601
  3           3.630             3.960
  4           3.317             3.551
  5           3.448             3.464
  6           3.345             3.283
  7           3.469             3.444
  8           3.182             3.258
  9           3.138             3.139
  10          2.999             3.212
  11          2.903             3.183
  12          2.802             3.027
  13          2.792             3.027
  14          2.695             3.086
  15          2.822             2.965
  16          2.679             2.939
  17          2.622             2.888
  18          2.628             2.914
  19          2.702             2.836
  20          2.561             2.837

One can see that per-CPU RW semaphore-based implementation scales better
with number of CPUs (especially that single CPU throughput is basically
the same).

Note, scalability is still limited by register_rwsem and this will
hopefully be address in follow up patch set(s).

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
---
 kernel/events/uprobes.c | 30 +++++++++++++++---------------
 1 file changed, 15 insertions(+), 15 deletions(-)

diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index bb480a2400e1..1d76551e5e23 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -39,7 +39,7 @@ static struct rb_root uprobes_tree = RB_ROOT;
  */
 #define no_uprobe_events()	RB_EMPTY_ROOT(&uprobes_tree)
 
-static DEFINE_RWLOCK(uprobes_treelock);	/* serialize rbtree access */
+DEFINE_STATIC_PERCPU_RWSEM(uprobes_treelock);	/* serialize rbtree access */
 
 #define UPROBES_HASH_SZ	13
 /* serialize uprobe->pending_list */
@@ -684,7 +684,7 @@ static void __put_uprobe(struct uprobe *uprobe, bool tree_locked)
 		bool destroy;
 
 		if (!tree_locked)
-			write_lock(&uprobes_treelock);
+			percpu_down_write(&uprobes_treelock);
 		/*
 		 * We might race with find_uprobe()->__get_uprobe() executed
 		 * from inside read-locked uprobes_treelock, which can bump
@@ -708,7 +708,7 @@ static void __put_uprobe(struct uprobe *uprobe, bool tree_locked)
 		if (destroy && uprobe_is_active(uprobe))
 			rb_erase(&uprobe->rb_node, &uprobes_tree);
 		if (!tree_locked)
-			write_unlock(&uprobes_treelock);
+			percpu_up_write(&uprobes_treelock);
 
 		/*
 		 * Beyond here we don't need RCU protection, we are either the
@@ -816,9 +816,9 @@ static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
 {
 	struct uprobe *uprobe;
 
-	read_lock(&uprobes_treelock);
+	percpu_down_read(&uprobes_treelock);
 	uprobe = __find_uprobe(inode, offset);
-	read_unlock(&uprobes_treelock);
+	percpu_up_read(&uprobes_treelock);
 
 	return uprobe;
 }
@@ -1205,7 +1205,7 @@ void uprobe_unregister_batch(struct inode *inode, int cnt, uprobe_consumer_fn ge
 		up_write(&uprobe->register_rwsem);
 	}
 
-	write_lock(&uprobes_treelock);
+	percpu_down_write(&uprobes_treelock);
 	for (i = 0; i < cnt; i++) {
 		uc = get_uprobe_consumer(i, ctx);
 		uprobe = uc->uprobe;
@@ -1216,7 +1216,7 @@ void uprobe_unregister_batch(struct inode *inode, int cnt, uprobe_consumer_fn ge
 		__put_uprobe(uprobe, true);
 		uc->uprobe = NULL;
 	}
-	write_unlock(&uprobes_treelock);
+	percpu_up_write(&uprobes_treelock);
 }
 
 static struct uprobe_consumer *uprobe_consumer_identity(size_t idx, void *ctx)
@@ -1321,7 +1321,7 @@ int uprobe_register_batch(struct inode *inode, int cnt,
 	}
 
 	ret = 0;
-	write_lock(&uprobes_treelock);
+	percpu_down_write(&uprobes_treelock);
 	for (i = 0; i < cnt; i++) {
 		struct uprobe *cur_uprobe;
 
@@ -1344,7 +1344,7 @@ int uprobe_register_batch(struct inode *inode, int cnt,
 		}
 	}
 unlock_treelock:
-	write_unlock(&uprobes_treelock);
+	percpu_up_write(&uprobes_treelock);
 	if (ret)
 		goto cleanup_uprobes;
 
@@ -1376,7 +1376,7 @@ int uprobe_register_batch(struct inode *inode, int cnt,
 	}
 cleanup_uprobes:
 	/* put all the successfully allocated/reused uprobes */
-	write_lock(&uprobes_treelock);
+	percpu_down_write(&uprobes_treelock);
 	for (i = 0; i < cnt; i++) {
 		uc = get_uprobe_consumer(i, ctx);
 
@@ -1384,7 +1384,7 @@ int uprobe_register_batch(struct inode *inode, int cnt,
 			__put_uprobe(uc->uprobe, true);
 		uc->uprobe = NULL;
 	}
-	write_unlock(&uprobes_treelock);
+	percpu_up_write(&uprobes_treelock);
 	return ret;
 }
 
@@ -1492,7 +1492,7 @@ static void build_probe_list(struct inode *inode,
 	min = vaddr_to_offset(vma, start);
 	max = min + (end - start) - 1;
 
-	read_lock(&uprobes_treelock);
+	percpu_down_read(&uprobes_treelock);
 	n = find_node_in_range(inode, min, max);
 	if (n) {
 		for (t = n; t; t = rb_prev(t)) {
@@ -1510,7 +1510,7 @@ static void build_probe_list(struct inode *inode,
 			list_add(&u->pending_list, head);
 		}
 	}
-	read_unlock(&uprobes_treelock);
+	percpu_up_read(&uprobes_treelock);
 }
 
 /* @vma contains reference counter, not the probed instruction. */
@@ -1601,9 +1601,9 @@ vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long e
 	min = vaddr_to_offset(vma, start);
 	max = min + (end - start) - 1;
 
-	read_lock(&uprobes_treelock);
+	percpu_down_read(&uprobes_treelock);
 	n = find_node_in_range(inode, min, max);
-	read_unlock(&uprobes_treelock);
+	percpu_up_read(&uprobes_treelock);
 
 	return !!n;
 }
-- 
2.43.0


  parent reply	other threads:[~2024-07-01 22:40 UTC|newest]

Thread overview: 67+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-07-01 22:39 [PATCH v2 00/12] uprobes: add batched register/unregister APIs and per-CPU RW semaphore Andrii Nakryiko
2024-07-01 22:39 ` [PATCH v2 01/12] uprobes: update outdated comment Andrii Nakryiko
2024-07-03 11:38   ` Oleg Nesterov
2024-07-03 18:24     ` Andrii Nakryiko
2024-07-03 21:51     ` Andrii Nakryiko
2024-07-10 13:31     ` Oleg Nesterov
2024-07-10 15:14       ` Andrii Nakryiko
2024-07-01 22:39 ` [PATCH v2 02/12] uprobes: correct mmap_sem locking assumptions in uprobe_write_opcode() Andrii Nakryiko
2024-07-03 11:41   ` Oleg Nesterov
2024-07-03 13:15   ` Masami Hiramatsu
2024-07-03 18:25     ` Andrii Nakryiko
2024-07-03 21:47       ` Masami Hiramatsu
2024-07-01 22:39 ` [PATCH v2 03/12] uprobes: simplify error handling for alloc_uprobe() Andrii Nakryiko
2024-07-01 22:39 ` [PATCH v2 04/12] uprobes: revamp uprobe refcounting and lifetime management Andrii Nakryiko
2024-07-02 10:22   ` Peter Zijlstra
2024-07-02 17:54     ` Andrii Nakryiko
2024-07-03 13:36   ` Peter Zijlstra
2024-07-03 20:47     ` Andrii Nakryiko
2024-07-04  8:03       ` Peter Zijlstra
2024-07-04  8:45         ` Peter Zijlstra
2024-07-04 14:40           ` Masami Hiramatsu
2024-07-04  8:31       ` Peter Zijlstra
2024-07-05 15:37   ` Oleg Nesterov
2024-07-06 17:00     ` Jiri Olsa
2024-07-06 17:05       ` Jiri Olsa
2024-07-07 14:46     ` Oleg Nesterov
2024-07-08 17:47       ` Andrii Nakryiko
2024-07-09 18:47         ` Oleg Nesterov
2024-07-09 20:59           ` Andrii Nakryiko
2024-07-09 21:31             ` Oleg Nesterov
2024-07-09 21:45               ` Andrii Nakryiko
2024-07-08 17:47     ` Andrii Nakryiko
2024-07-01 22:39 ` [PATCH v2 05/12] uprobes: move offset and ref_ctr_offset into uprobe_consumer Andrii Nakryiko
2024-07-03  8:13   ` Peter Zijlstra
2024-07-03 10:13     ` Masami Hiramatsu
2024-07-03 18:23       ` Andrii Nakryiko
2024-07-07 12:48   ` Oleg Nesterov
2024-07-08 17:56     ` Andrii Nakryiko
2024-07-01 22:39 ` [PATCH v2 06/12] uprobes: add batch uprobe register/unregister APIs Andrii Nakryiko
2024-07-01 22:39 ` [PATCH v2 07/12] uprobes: inline alloc_uprobe() logic into __uprobe_register() Andrii Nakryiko
2024-07-01 22:39 ` [PATCH v2 08/12] uprobes: split uprobe allocation and uprobes_tree insertion steps Andrii Nakryiko
2024-07-01 22:39 ` [PATCH v2 09/12] uprobes: batch uprobes_treelock during registration Andrii Nakryiko
2024-07-01 22:39 ` [PATCH v2 10/12] uprobes: improve lock batching for uprobe_unregister_batch Andrii Nakryiko
2024-07-01 22:39 ` [PATCH v2 11/12] uprobes,bpf: switch to batch uprobe APIs for BPF multi-uprobes Andrii Nakryiko
2024-07-01 22:39 ` Andrii Nakryiko [this message]
2024-07-02 10:23 ` [PATCH v2 00/12] uprobes: add batched register/unregister APIs and per-CPU RW semaphore Peter Zijlstra
2024-07-02 11:54   ` Peter Zijlstra
2024-07-02 12:01     ` Peter Zijlstra
2024-07-02 17:54     ` Andrii Nakryiko
2024-07-02 19:18       ` Peter Zijlstra
2024-07-02 23:56         ` Paul E. McKenney
2024-07-03  4:54           ` Andrii Nakryiko
2024-07-03  7:50           ` Peter Zijlstra
2024-07-03 14:08             ` Paul E. McKenney
2024-07-04  8:39               ` Peter Zijlstra
2024-07-04 15:13                 ` Paul E. McKenney
2024-07-03 21:57             ` Steven Rostedt
2024-07-03 22:07               ` Paul E. McKenney
2024-07-03  4:47         ` Andrii Nakryiko
2024-07-03  8:07           ` Peter Zijlstra
2024-07-03 20:55             ` Andrii Nakryiko
2024-07-03 21:33 ` Andrii Nakryiko
2024-07-04  9:15   ` Peter Zijlstra
2024-07-04 13:56     ` Steven Rostedt
2024-07-04 15:44     ` Paul E. McKenney
2024-07-08 17:47       ` Andrii Nakryiko
2024-07-08 17:48     ` Andrii Nakryiko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240701223935.3783951-13-andrii@kernel.org \
    --to=andrii@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=clm@meta.com \
    --cc=jolsa@kernel.org \
    --cc=linux-trace-kernel@vger.kernel.org \
    --cc=mhiramat@kernel.org \
    --cc=mingo@redhat.com \
    --cc=oleg@redhat.com \
    --cc=paulmck@kernel.org \
    --cc=peterz@infradead.org \
    --cc=rostedt@goodmis.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).