BPF List
 help / color / mirror / Atom feed
* [PATCH bpf-next 0/2 v3] bpf: Add LINK_DETACH for perf links
@ 2026-05-01 16:08 Florian Lehner
  2026-05-01 16:09 ` [PATCH bpf-next 1/2 v3] bpf: Add LINK_DETACH support for perf link Florian Lehner
  2026-05-01 16:09 ` [PATCH bpf-next 2/2 v3] selftests/bpf: Test LINK_DETACH " Florian Lehner
  0 siblings, 2 replies; 7+ messages in thread
From: Florian Lehner @ 2026-05-01 16:08 UTC (permalink / raw)
  To: bpf
  Cc: ast, daniel, john.fastabend, andrii, martin.lau, eddyz87, memxor,
	song, yonghong.song, jolsa, shuah, davem, kuba, hawk, sdf,
	sun.jian.kdev, Florian Lehner

73b11c2a introduced LINK_DETACH and implemented it for some link types,
like xdp, netns and others.

This patch implements LINK_DETACH for perf links, re-using existing link
release handling code.

---

Change log:

v3:
 1. Introduce bpf_perf_link_mutex to guard against concurrent access 

v2: https://lore.kernel.org/bpf/20260304210212.235096-1-dev@der-flo.net/
 1. Drop LINK_DETACH support for iter
 2. Add test for LINK_DETACH for perf event links

v1: https://lore.kernel.org/bpf/aJOhPoTLdYnZmHYA@der-flo.net/

Florian Lehner (2):
  bpf: Add LINK_DETACH support for perf link
  selftests/bpf: Test LINK_DETACH for perf link

 kernel/bpf/syscall.c                          | 87 ++++++++++++++++---
 .../selftests/bpf/prog_tests/perf_link.c      | 79 ++++++++++++++---
 2 files changed, 140 insertions(+), 26 deletions(-)

-- 
2.53.0


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH bpf-next 1/2 v3] bpf: Add LINK_DETACH support for perf link
  2026-05-01 16:08 [PATCH bpf-next 0/2 v3] bpf: Add LINK_DETACH for perf links Florian Lehner
@ 2026-05-01 16:09 ` Florian Lehner
  2026-05-01 16:52   ` bot+bpf-ci
  2026-05-01 16:59   ` sashiko-bot
  2026-05-01 16:09 ` [PATCH bpf-next 2/2 v3] selftests/bpf: Test LINK_DETACH " Florian Lehner
  1 sibling, 2 replies; 7+ messages in thread
From: Florian Lehner @ 2026-05-01 16:09 UTC (permalink / raw)
  To: bpf
  Cc: ast, daniel, john.fastabend, andrii, martin.lau, eddyz87, memxor,
	song, yonghong.song, jolsa, shuah, davem, kuba, hawk, sdf,
	sun.jian.kdev, Florian Lehner

Implement the .detach operation for bpf_perf_link, allowing BPF_LINK_DETACH
to release the perf event without destroying the link object. This mirrors
the existing behavior for xdp and cgroup links.

Introduce bpf_perf_link_mutext to guard perf_file against concurrent access
from BPF_OBJ_GET_INFO_BY_FD and /proc fdinfo: the detach path NULLs out
perf_file under the lock, while fill_link_info and show_fdinfo take a
get_file() reference under the same lock before dereferencing it.

Signed-off-by: Florian Lehner <dev@der-flo.net>
---
 kernel/bpf/syscall.c | 87 +++++++++++++++++++++++++++++++++++++-------
 1 file changed, 73 insertions(+), 14 deletions(-)

diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 3b1f0ba02f61..efd759970e10 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -3880,13 +3880,35 @@ struct bpf_perf_link {
 	struct file *perf_file;
 };
 
+/* Serializes bpf_perf_link_release() against bpf_perf_link_fill_link_info()
+ * and bpf_perf_link_show_fdinfo() to prevent a use-after-free on perf_file
+ * when BPF_LINK_DETACH races with BPF_OBJ_GET_INFO_BY_FD or /proc fdinfo.
+ */
+static DEFINE_MUTEX(bpf_perf_link_mutex);
+
 static void bpf_perf_link_release(struct bpf_link *link)
 {
 	struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
-	struct perf_event *event = perf_link->perf_file->private_data;
+	struct perf_event *event;
+	struct file *perf_file;
+
+	mutex_lock(&bpf_perf_link_mutex);
+	perf_file = perf_link->perf_file;
+	perf_link->perf_file = NULL;
+	mutex_unlock(&bpf_perf_link_mutex);
 
+	if (!perf_file)
+		return;
+
+	event = perf_file->private_data;
 	perf_event_free_bpf_prog(event);
-	fput(perf_link->perf_file);
+	fput(perf_file);
+}
+
+static int bpf_perf_link_detach(struct bpf_link *link)
+{
+	bpf_perf_link_release(link);
+	return 0;
 }
 
 static void bpf_perf_link_dealloc(struct bpf_link *link)
@@ -4095,22 +4117,42 @@ static int bpf_perf_link_fill_link_info(const struct bpf_link *link,
 {
 	struct bpf_perf_link *perf_link;
 	const struct perf_event *event;
+	struct file *perf_file;
+	int ret;
 
 	perf_link = container_of(link, struct bpf_perf_link, link);
-	event = perf_get_event(perf_link->perf_file);
-	if (IS_ERR(event))
+
+	mutex_lock(&bpf_perf_link_mutex);
+	perf_file = perf_link->perf_file;
+	if (perf_file)
+		get_file(perf_file);
+	mutex_unlock(&bpf_perf_link_mutex);
+
+	if (!perf_file)
+		return 0;
+
+	event = perf_get_event(perf_file);
+	if (IS_ERR(event)) {
+		fput(perf_file);
 		return PTR_ERR(event);
+	}
 
 	switch (event->prog->type) {
 	case BPF_PROG_TYPE_PERF_EVENT:
-		return bpf_perf_link_fill_perf_event(event, info);
+		ret = bpf_perf_link_fill_perf_event(event, info);
+		break;
 	case BPF_PROG_TYPE_TRACEPOINT:
-		return bpf_perf_link_fill_tracepoint(event, info);
+		ret = bpf_perf_link_fill_tracepoint(event, info);
+		break;
 	case BPF_PROG_TYPE_KPROBE:
-		return bpf_perf_link_fill_probe(event, info);
+		ret = bpf_perf_link_fill_probe(event, info);
+		break;
 	default:
-		return -EOPNOTSUPP;
+		ret = -EOPNOTSUPP;
 	}
+
+	fput(perf_file);
+	return ret;
 }
 
 static void bpf_perf_event_link_show_fdinfo(const struct perf_event *event,
@@ -4163,26 +4205,43 @@ static void bpf_perf_link_show_fdinfo(const struct bpf_link *link,
 {
 	struct bpf_perf_link *perf_link;
 	const struct perf_event *event;
+	struct file *perf_file;
 
 	perf_link = container_of(link, struct bpf_perf_link, link);
-	event = perf_get_event(perf_link->perf_file);
-	if (IS_ERR(event))
+
+	mutex_lock(&bpf_perf_link_mutex);
+	perf_file = perf_link->perf_file;
+	if (perf_file)
+		get_file(perf_file);
+	mutex_unlock(&bpf_perf_link_mutex);
+
+	if (!perf_file)
 		return;
 
+	event = perf_get_event(perf_file);
+	if (IS_ERR(event))
+		goto out;
+
 	switch (event->prog->type) {
 	case BPF_PROG_TYPE_PERF_EVENT:
-		return bpf_perf_event_link_show_fdinfo(event, seq);
+		bpf_perf_event_link_show_fdinfo(event, seq);
+		break;
 	case BPF_PROG_TYPE_TRACEPOINT:
-		return bpf_tracepoint_link_show_fdinfo(event, seq);
+		bpf_tracepoint_link_show_fdinfo(event, seq);
+		break;
 	case BPF_PROG_TYPE_KPROBE:
-		return bpf_probe_link_show_fdinfo(event, seq);
+		bpf_probe_link_show_fdinfo(event, seq);
+		break;
 	default:
-		return;
+		break;
 	}
+out:
+	fput(perf_file);
 }
 
 static const struct bpf_link_ops bpf_perf_link_lops = {
 	.release = bpf_perf_link_release,
+	.detach = bpf_perf_link_detach,
 	.dealloc = bpf_perf_link_dealloc,
 	.fill_link_info = bpf_perf_link_fill_link_info,
 	.show_fdinfo = bpf_perf_link_show_fdinfo,
-- 
2.53.0


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH bpf-next 2/2 v3] selftests/bpf: Test LINK_DETACH for perf link
  2026-05-01 16:08 [PATCH bpf-next 0/2 v3] bpf: Add LINK_DETACH for perf links Florian Lehner
  2026-05-01 16:09 ` [PATCH bpf-next 1/2 v3] bpf: Add LINK_DETACH support for perf link Florian Lehner
@ 2026-05-01 16:09 ` Florian Lehner
  2026-05-01 17:11   ` sashiko-bot
  2026-05-03 13:14   ` Jiri Olsa
  1 sibling, 2 replies; 7+ messages in thread
From: Florian Lehner @ 2026-05-01 16:09 UTC (permalink / raw)
  To: bpf
  Cc: ast, daniel, john.fastabend, andrii, martin.lau, eddyz87, memxor,
	song, yonghong.song, jolsa, shuah, davem, kuba, hawk, sdf,
	sun.jian.kdev, Florian Lehner

Add test_perf_link_detach() to verify that the new LINK_DETACH support for
BPF perf links works correctly. The test creates a link to a BPF program
for a software perf event, confirms the program is executed, calls
bpf_link_detach() to exercise the BPF_LINK_DETACH syscall path, and then
verifies the program is no longer invoked after detach.

Signed-off-by: Florian Lehner <dev@der-flo.net>
---
 .../selftests/bpf/prog_tests/perf_link.c      | 79 ++++++++++++++++---
 1 file changed, 67 insertions(+), 12 deletions(-)

diff --git a/tools/testing/selftests/bpf/prog_tests/perf_link.c b/tools/testing/selftests/bpf/prog_tests/perf_link.c
index 9e3a0d217af8..b75112c1b67d 100644
--- a/tools/testing/selftests/bpf/prog_tests/perf_link.c
+++ b/tools/testing/selftests/bpf/prog_tests/perf_link.c
@@ -18,29 +18,84 @@ static void burn_cpu(void)
 		barrier();
 }
 
-void test_perf_link(void)
+static int perf_link_setup(struct test_perf_link **skel, int *pfd)
 {
-	struct test_perf_link *skel = NULL;
 	struct perf_event_attr attr;
-	int pfd = -1, link_fd = -1, err;
-	int run_cnt_before, run_cnt_after;
-	struct bpf_link_info info;
-	__u32 info_len = sizeof(info);
-	__u64 timeout_time_ns;
 
-	/* create perf event */
 	memset(&attr, 0, sizeof(attr));
 	attr.size = sizeof(attr);
 	attr.type = PERF_TYPE_SOFTWARE;
 	attr.config = PERF_COUNT_SW_CPU_CLOCK;
 	attr.freq = 1;
 	attr.sample_freq = 1000;
-	pfd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, PERF_FLAG_FD_CLOEXEC);
-	if (!ASSERT_GE(pfd, 0, "perf_fd"))
+	*pfd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, PERF_FLAG_FD_CLOEXEC);
+	if (!ASSERT_GE(*pfd, 0, "perf_fd"))
+		return -1;
+
+	*skel = test_perf_link__open_and_load();
+	if (!ASSERT_OK_PTR(*skel, "skel_load"))
+		return -1;
+
+	return 0;
+}
+
+void test_perf_link_detach(void)
+{
+	struct test_perf_link *skel = NULL;
+	int pfd = -1, link_fd = -1, err;
+	int run_cnt_before, run_cnt_after;
+	__u64 timeout_time_ns;
+
+	if (perf_link_setup(&skel, &pfd))
+		goto cleanup;
+
+	link_fd = bpf_link_create(bpf_program__fd(skel->progs.handler), pfd,
+				  BPF_PERF_EVENT, NULL);
+	if (!ASSERT_GE(link_fd, 0, "link_fd"))
 		goto cleanup;
 
-	skel = test_perf_link__open_and_load();
-	if (!ASSERT_OK_PTR(skel, "skel_load"))
+	/* ensure we get at least one perf_event prog execution */
+	timeout_time_ns = get_time_ns() + BURN_TIMEOUT_NS;
+	while (true) {
+		burn_cpu();
+		if (skel->bss->run_cnt > 0)
+			break;
+		if (!ASSERT_LT(get_time_ns(), timeout_time_ns, "run_cnt_timeout"))
+			goto cleanup;
+	}
+
+	/* detach via BPF_LINK_DETACH - BPF program should no longer be executed */
+	err = bpf_link_detach(link_fd);
+	if (!ASSERT_OK(err, "link_detach"))
+		goto cleanup;
+
+	/* make sure there are no stragglers */
+	kern_sync_rcu();
+
+	run_cnt_before = skel->bss->run_cnt;
+	burn_cpu();
+	run_cnt_after = skel->bss->run_cnt;
+
+	ASSERT_EQ(run_cnt_before, run_cnt_after, "run_cnt_detached");
+
+cleanup:
+	if (link_fd >= 0)
+		close(link_fd);
+	if (pfd >= 0)
+		close(pfd);
+	test_perf_link__destroy(skel);
+}
+
+void test_perf_link(void)
+{
+	struct test_perf_link *skel = NULL;
+	int pfd = -1, link_fd = -1, err;
+	int run_cnt_before, run_cnt_after;
+	struct bpf_link_info info;
+	__u32 info_len = sizeof(info);
+	__u64 timeout_time_ns;
+
+	if (perf_link_setup(&skel, &pfd))
 		goto cleanup;
 
 	link_fd = bpf_link_create(bpf_program__fd(skel->progs.handler), pfd,
-- 
2.53.0


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH bpf-next 1/2 v3] bpf: Add LINK_DETACH support for perf link
  2026-05-01 16:09 ` [PATCH bpf-next 1/2 v3] bpf: Add LINK_DETACH support for perf link Florian Lehner
@ 2026-05-01 16:52   ` bot+bpf-ci
  2026-05-01 16:59   ` sashiko-bot
  1 sibling, 0 replies; 7+ messages in thread
From: bot+bpf-ci @ 2026-05-01 16:52 UTC (permalink / raw)
  To: dev, bpf
  Cc: ast, daniel, john.fastabend, andrii, martin.lau, eddyz87, memxor,
	song, yonghong.song, jolsa, shuah, davem, kuba, hawk, sdf,
	sun.jian.kdev, dev, ast, andrii, daniel, martin.lau, eddyz87,
	yonghong.song, clm, ihor.solodrai

[-- Attachment #1: Type: text/plain, Size: 1957 bytes --]

> diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
> index 3b1f0ba02f61..efd759970e10 100644
> --- a/kernel/bpf/syscall.c
> +++ b/kernel/bpf/syscall.c
> @@ -3880,13 +3880,35 @@ struct bpf_perf_link {
>  	struct file *perf_file;
>  };
>
> +/* Serializes bpf_perf_link_release() against bpf_perf_link_fill_link_info()
> + * and bpf_perf_link_show_fdinfo() to prevent a use-after-free on perf_file
> + * when BPF_LINK_DETACH races with BPF_OBJ_GET_INFO_BY_FD or /proc fdinfo.
> + */
> +static DEFINE_MUTEX(bpf_perf_link_mutex);

Should this commit include a Fixes tag? The use-after-free race condition
being fixed appears to have been introduced in commit 1b715e1b0ec5
("bpf: Support ->fill_link_info for perf_event"), which added
bpf_perf_link_fill_link_info() and bpf_perf_link_show_fdinfo() that
access perf_link->perf_file without synchronization, creating a race
with bpf_perf_link_release().

Suggested: Fixes: 1b715e1b0ec5 ("bpf: Support ->fill_link_info for perf_event")

>
>  static void bpf_perf_link_release(struct bpf_link *link)
>  {
>  	struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
> -	struct perf_event *event = perf_link->perf_file->private_data;
> +	struct perf_event *event;
> +	struct file *perf_file;
> +
> +	mutex_lock(&bpf_perf_link_mutex);
> +	perf_file = perf_link->perf_file;
> +	perf_link->perf_file = NULL;
> +	mutex_unlock(&bpf_perf_link_mutex);
>
> +	if (!perf_file)
> +		return;
> +
> +	event = perf_file->private_data;
>  	perf_event_free_bpf_prog(event);
> -	fput(perf_link->perf_file);
> +	fput(perf_file);
> +}
> +
> +static int bpf_perf_link_detach(struct bpf_link *link)
> +{
> +	bpf_perf_link_release(link);
> +	return 0;
>  }

[ ... ]


---
AI reviewed your patch. Please fix the bug or email reply why it's not a bug.
See: https://github.com/kernel-patches/vmtest/blob/master/ci/claude/README.md

CI run summary: https://github.com/kernel-patches/bpf/actions/runs/25222566755

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH bpf-next 1/2 v3] bpf: Add LINK_DETACH support for perf link
  2026-05-01 16:09 ` [PATCH bpf-next 1/2 v3] bpf: Add LINK_DETACH support for perf link Florian Lehner
  2026-05-01 16:52   ` bot+bpf-ci
@ 2026-05-01 16:59   ` sashiko-bot
  1 sibling, 0 replies; 7+ messages in thread
From: sashiko-bot @ 2026-05-01 16:59 UTC (permalink / raw)
  To: Florian Lehner; +Cc: bpf

Thank you for your contribution! Sashiko AI review found 1 potential issue(s) to consider:
- [High] Concurrent execution of BPF_LINK_DETACH and BPF_OBJ_GET_INFO_BY_FD (or /proc/pid/fdinfo reads) causes a NULL pointer dereference on event->prog.
--

commit 738bb49794369ff2bcc7c3d461300bd57e714cf5
Author: Florian Lehner <dev@der-flo.net>

bpf: Add LINK_DETACH support for perf link

This commit implements the .detach operation for bpf_perf_link, allowing
BPF_LINK_DETACH to release the perf event without destroying the link object.

> diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
> --- a/kernel/bpf/syscall.c
> +++ b/kernel/bpf/syscall.c

[ ... ]

> @@ -4095,22 +4117,42 @@ static int bpf_perf_link_fill_link_info(const struct bpf_link *link,
>  {
>  	struct bpf_perf_link *perf_link;
>  	const struct perf_event *event;
> +	struct file *perf_file;
> +	int ret;
>  
>  	perf_link = container_of(link, struct bpf_perf_link, link);
> -	event = perf_get_event(perf_link->perf_file);
> -	if (IS_ERR(event))
> +
> +	mutex_lock(&bpf_perf_link_mutex);
> +	perf_file = perf_link->perf_file;
> +	if (perf_file)
> +		get_file(perf_file);
> +	mutex_unlock(&bpf_perf_link_mutex);
> +
> +	if (!perf_file)
> +		return 0;
> +
> +	event = perf_get_event(perf_file);
> +	if (IS_ERR(event)) {
> +		fput(perf_file);
>  		return PTR_ERR(event);
> +	}
>  
>  	switch (event->prog->type) {

Can this result in a NULL pointer dereference on event->prog?

If a concurrent BPF_LINK_DETACH triggers bpf_perf_link_release(), it sets
perf_link->perf_file = NULL under the mutex, drops the mutex, and then calls
perf_event_free_bpf_prog(event).

If perf_event_free_bpf_prog() clears event->prog right after we drop the
mutex here but before we evaluate the switch statement, evaluating
event->prog->type could panic.

Would it be safer to use link->prog->type instead? The program reference in
link->prog is pinned for the lifetime of the link object.

-- 
Sashiko AI review · https://sashiko.dev/#/patchset/20260501160901.224134-1-dev@der-flo.net?part=1

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH bpf-next 2/2 v3] selftests/bpf: Test LINK_DETACH for perf link
  2026-05-01 16:09 ` [PATCH bpf-next 2/2 v3] selftests/bpf: Test LINK_DETACH " Florian Lehner
@ 2026-05-01 17:11   ` sashiko-bot
  2026-05-03 13:14   ` Jiri Olsa
  1 sibling, 0 replies; 7+ messages in thread
From: sashiko-bot @ 2026-05-01 17:11 UTC (permalink / raw)
  To: Florian Lehner; +Cc: bpf

Thank you for your contribution! Sashiko AI review found 1 potential issue(s) to consider:
- [Medium] The `test_perf_link_detach` test verification is brittle and likely to falsely pass, as a single call to `burn_cpu()` is too short to guarantee a perf event tick.
--

commit 80a1776d4374467cf1d037c1e22a1252e27c885c
Author: Florian Lehner <dev@der-flo.net>

selftests/bpf: Test LINK_DETACH for perf link

This commit adds a new test, test_perf_link_detach(), to verify the
BPF_LINK_DETACH syscall path for software perf events. It creates a link,
verifies execution, detaches it, and checks that the program is no longer
invoked.

> diff --git a/tools/testing/selftests/bpf/prog_tests/perf_link.c b/tools/testing/selftests/bpf/prog_tests/perf_link.c
> index 9e3a0d217af8..b75112c1b67d 100644
> --- a/tools/testing/selftests/bpf/prog_tests/perf_link.c
> +++ b/tools/testing/selftests/bpf/prog_tests/perf_link.c

[ ... ]

> +void test_perf_link_detach(void)
> +{
> +	struct test_perf_link *skel = NULL;
> +	int pfd = -1, link_fd = -1, err;
> +	int run_cnt_before, run_cnt_after;
> +	__u64 timeout_time_ns;

[ ... ]

> +	/* detach via BPF_LINK_DETACH - BPF program should no longer be executed */
> +	err = bpf_link_detach(link_fd);
> +	if (!ASSERT_OK(err, "link_detach"))
> +		goto cleanup;
> +
> +	/* make sure there are no stragglers */
> +	kern_sync_rcu();
> +
> +	run_cnt_before = skel->bss->run_cnt;
> +	burn_cpu();
> +	run_cnt_after = skel->bss->run_cnt;
> +
> +	ASSERT_EQ(run_cnt_before, run_cnt_after, "run_cnt_detached");

Is a single call to burn_cpu() sufficient to guarantee a perf event tick here?

The software CPU clock perf event is configured in perf_link_setup() to sample
at 1000 Hz, meaning it triggers approximately once every 1 millisecond of CPU
time.

The burn_cpu() function executes a fixed loop of 1,000,000 iterations. On
modern CPUs, this loop often takes significantly less than 1 millisecond to
complete.

Because a single burn_cpu() invocation takes less than the 1ms interval, is
it possible that no timer interrupt occurs during this window?

If so, run_cnt_after would equal run_cnt_before even if the detach operation
failed and the program was still attached, causing the test to falsely pass.
Could this be changed to use a time-based loop (similar to the initial
execution check) for several milliseconds to ensure correctness?

[ ... ]

-- 
Sashiko AI review · https://sashiko.dev/#/patchset/20260501160901.224134-1-dev@der-flo.net?part=2

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH bpf-next 2/2 v3] selftests/bpf: Test LINK_DETACH for perf link
  2026-05-01 16:09 ` [PATCH bpf-next 2/2 v3] selftests/bpf: Test LINK_DETACH " Florian Lehner
  2026-05-01 17:11   ` sashiko-bot
@ 2026-05-03 13:14   ` Jiri Olsa
  1 sibling, 0 replies; 7+ messages in thread
From: Jiri Olsa @ 2026-05-03 13:14 UTC (permalink / raw)
  To: Florian Lehner
  Cc: bpf, ast, daniel, john.fastabend, andrii, martin.lau, eddyz87,
	memxor, song, yonghong.song, shuah, davem, kuba, hawk, sdf,
	sun.jian.kdev

On Fri, May 01, 2026 at 06:09:01PM +0200, Florian Lehner wrote:
> Add test_perf_link_detach() to verify that the new LINK_DETACH support for
> BPF perf links works correctly. The test creates a link to a BPF program
> for a software perf event, confirms the program is executed, calls
> bpf_link_detach() to exercise the BPF_LINK_DETACH syscall path, and then
> verifies the program is no longer invoked after detach.
> 
> Signed-off-by: Florian Lehner <dev@der-flo.net>

hi,
you mentioned in here [1] the primary use case is to disable uprobes
temporarily.. I was expecting this as a selftest and curious to see
how that works.. could you add selftest for that?

thanks,
jirka


[1] https://lore.kernel.org/bpf/aJOhPoTLdYnZmHYA@der-flo.net/


> ---
>  .../selftests/bpf/prog_tests/perf_link.c      | 79 ++++++++++++++++---
>  1 file changed, 67 insertions(+), 12 deletions(-)
> 
> diff --git a/tools/testing/selftests/bpf/prog_tests/perf_link.c b/tools/testing/selftests/bpf/prog_tests/perf_link.c
> index 9e3a0d217af8..b75112c1b67d 100644
> --- a/tools/testing/selftests/bpf/prog_tests/perf_link.c
> +++ b/tools/testing/selftests/bpf/prog_tests/perf_link.c
> @@ -18,29 +18,84 @@ static void burn_cpu(void)
>  		barrier();
>  }
>  
> -void test_perf_link(void)
> +static int perf_link_setup(struct test_perf_link **skel, int *pfd)
>  {
> -	struct test_perf_link *skel = NULL;
>  	struct perf_event_attr attr;
> -	int pfd = -1, link_fd = -1, err;
> -	int run_cnt_before, run_cnt_after;
> -	struct bpf_link_info info;
> -	__u32 info_len = sizeof(info);
> -	__u64 timeout_time_ns;
>  
> -	/* create perf event */
>  	memset(&attr, 0, sizeof(attr));
>  	attr.size = sizeof(attr);
>  	attr.type = PERF_TYPE_SOFTWARE;
>  	attr.config = PERF_COUNT_SW_CPU_CLOCK;
>  	attr.freq = 1;
>  	attr.sample_freq = 1000;
> -	pfd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, PERF_FLAG_FD_CLOEXEC);
> -	if (!ASSERT_GE(pfd, 0, "perf_fd"))
> +	*pfd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, PERF_FLAG_FD_CLOEXEC);
> +	if (!ASSERT_GE(*pfd, 0, "perf_fd"))
> +		return -1;
> +
> +	*skel = test_perf_link__open_and_load();
> +	if (!ASSERT_OK_PTR(*skel, "skel_load"))
> +		return -1;
> +
> +	return 0;
> +}
> +
> +void test_perf_link_detach(void)
> +{
> +	struct test_perf_link *skel = NULL;
> +	int pfd = -1, link_fd = -1, err;
> +	int run_cnt_before, run_cnt_after;
> +	__u64 timeout_time_ns;
> +
> +	if (perf_link_setup(&skel, &pfd))
> +		goto cleanup;
> +
> +	link_fd = bpf_link_create(bpf_program__fd(skel->progs.handler), pfd,
> +				  BPF_PERF_EVENT, NULL);
> +	if (!ASSERT_GE(link_fd, 0, "link_fd"))
>  		goto cleanup;
>  
> -	skel = test_perf_link__open_and_load();
> -	if (!ASSERT_OK_PTR(skel, "skel_load"))
> +	/* ensure we get at least one perf_event prog execution */
> +	timeout_time_ns = get_time_ns() + BURN_TIMEOUT_NS;
> +	while (true) {
> +		burn_cpu();
> +		if (skel->bss->run_cnt > 0)
> +			break;
> +		if (!ASSERT_LT(get_time_ns(), timeout_time_ns, "run_cnt_timeout"))
> +			goto cleanup;
> +	}
> +
> +	/* detach via BPF_LINK_DETACH - BPF program should no longer be executed */
> +	err = bpf_link_detach(link_fd);
> +	if (!ASSERT_OK(err, "link_detach"))
> +		goto cleanup;
> +
> +	/* make sure there are no stragglers */
> +	kern_sync_rcu();
> +
> +	run_cnt_before = skel->bss->run_cnt;
> +	burn_cpu();
> +	run_cnt_after = skel->bss->run_cnt;
> +
> +	ASSERT_EQ(run_cnt_before, run_cnt_after, "run_cnt_detached");
> +
> +cleanup:
> +	if (link_fd >= 0)
> +		close(link_fd);
> +	if (pfd >= 0)
> +		close(pfd);
> +	test_perf_link__destroy(skel);
> +}
> +
> +void test_perf_link(void)
> +{
> +	struct test_perf_link *skel = NULL;
> +	int pfd = -1, link_fd = -1, err;
> +	int run_cnt_before, run_cnt_after;
> +	struct bpf_link_info info;
> +	__u32 info_len = sizeof(info);
> +	__u64 timeout_time_ns;
> +
> +	if (perf_link_setup(&skel, &pfd))
>  		goto cleanup;
>  
>  	link_fd = bpf_link_create(bpf_program__fd(skel->progs.handler), pfd,
> -- 
> 2.53.0
> 

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2026-05-03 13:14 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-05-01 16:08 [PATCH bpf-next 0/2 v3] bpf: Add LINK_DETACH for perf links Florian Lehner
2026-05-01 16:09 ` [PATCH bpf-next 1/2 v3] bpf: Add LINK_DETACH support for perf link Florian Lehner
2026-05-01 16:52   ` bot+bpf-ci
2026-05-01 16:59   ` sashiko-bot
2026-05-01 16:09 ` [PATCH bpf-next 2/2 v3] selftests/bpf: Test LINK_DETACH " Florian Lehner
2026-05-01 17:11   ` sashiko-bot
2026-05-03 13:14   ` Jiri Olsa

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox