linux-trace-devel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/3] libtracefs utest: Fixes and new tests
@ 2024-10-17 20:03 Steven Rostedt
  2024-10-17 20:03 ` [PATCH 1/3] libtracefs utest: Add test to check handling multiple dynamic events Steven Rostedt
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: Steven Rostedt @ 2024-10-17 20:03 UTC (permalink / raw)
  To: linux-trace-devel; +Cc: Steven Rostedt (Google)

From: "Steven Rostedt (Google)" <rostedt@goodmis.org>

- Add a test to test traceefs_dynevent_get_all() more thoroughly

- Fix the min percent testing in case of large sub-buffers

- Do not test more events than can be held in the ring buffer.

Steven Rostedt (Google) (3):
  libtracefs utest: Add test to check handling multiple dynamic events
  libtracefs utest: Fix min percent test
  libtracefs utest: Do not test more events than what the ring buffer
    can hold

 utest/tracefs-utest.c | 184 +++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 183 insertions(+), 1 deletion(-)

-- 
2.45.2


^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH 1/3] libtracefs utest: Add test to check handling multiple dynamic events
  2024-10-17 20:03 [PATCH 0/3] libtracefs utest: Fixes and new tests Steven Rostedt
@ 2024-10-17 20:03 ` Steven Rostedt
  2024-10-17 20:03 ` [PATCH 2/3] libtracefs utest: Fix min percent test Steven Rostedt
  2024-10-17 20:03 ` [PATCH 3/3] libtracefs utest: Do not test more events than what the ring buffer can hold Steven Rostedt
  2 siblings, 0 replies; 4+ messages in thread
From: Steven Rostedt @ 2024-10-17 20:03 UTC (permalink / raw)
  To: linux-trace-devel; +Cc: Steven Rostedt (Google)

From: "Steven Rostedt (Google)" <rostedt@goodmis.org>

Add a new "mutli probes" test that will create kprobes, kretprobes,
uprobes, uretprobes, eprobes and synthetic events and make sure that the
counters of those events are working properly.

Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
---
 utest/tracefs-utest.c | 162 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 162 insertions(+)

diff --git a/utest/tracefs-utest.c b/utest/tracefs-utest.c
index 5bfd665b9990..742f4546bef0 100644
--- a/utest/tracefs-utest.c
+++ b/utest/tracefs-utest.c
@@ -2574,6 +2574,167 @@ static void test_uprobes(void)
 	test_uprobes_instance(test_instance);
 }
 
+static void test_multi_probes_instance(struct tracefs_instance *instance)
+{
+	struct probe_test tests[] = {
+		{ TRACEFS_DYNEVENT_UPROBE, "p", "utest", "utest_u", NULL, "arg1=$stack2" },
+		{ TRACEFS_DYNEVENT_URETPROBE, "r", "utest", "utest_r", NULL, "arg1=$retval" },
+		{ TRACEFS_DYNEVENT_EPROBE, "e", NULL, "sopen_in", "syscalls.sys_enter_openat",
+					   "file=+0($filename):ustring" },
+		{ TRACEFS_DYNEVENT_EPROBE, "e", "etest", "sopen_out", "syscalls.sys_exit_openat",
+					   "res=$ret:u64" },
+		{ TRACEFS_DYNEVENT_KPROBE, "p", NULL, "mkdir", "do_mkdirat", "path=+u0($arg2):ustring" },
+		{ TRACEFS_DYNEVENT_KPROBE, "p", NULL, "close", "close_fd", NULL },
+		{ TRACEFS_DYNEVENT_KPROBE, "p", "ptest", "open2", "do_sys_openat2",
+				  "file=+u0($arg2):ustring flags=+0($arg3):x64" },
+		{ TRACEFS_DYNEVENT_KRETPROBE, NULL, NULL, "retopen", "do_sys_openat2", "ret=$retval" },
+		{ TRACEFS_DYNEVENT_KRETPROBE, NULL, NULL, NULL, "do_sys_open", "ret=$retval" },
+	};
+	struct test_synth sevents[] = {
+		{"synth_1", "sched", "sched_waking", "sched", "sched_switch", "pid", "next_pid", "pid_match"},
+		{"synth_2", "syscalls", "sys_enter_openat2", "syscalls", "sys_exit_openat2", "__syscall_nr", "__syscall_nr", "nr_match"},
+	};
+	int sevents_count = sizeof(sevents) / sizeof((sevents)[0]);
+	int count = sizeof(tests) / sizeof((tests)[0]);
+	struct tracefs_dynevent **probes;
+	struct tracefs_synth **synth;
+	struct tep_handle *tep;
+	char self[PATH_MAX] = { 0 };
+	char *target = NULL;
+	char *tsys, *tevent;
+	char *tmp, *sav;
+	/* The below matches the tests and sevents arrays */
+	const int uprobes_cnt = 1;
+	const int uretprobes_cnt = 1;
+	const int eprobes_cnt = 2;
+	const int kprobes_cnt = 3;
+	const int kretprobes_cnt = 2;
+	int ret;
+	int i;
+
+	synth = calloc(sevents_count + 1, sizeof(*synth));
+
+	tep = tracefs_local_events(NULL);
+	CU_TEST(tep != NULL);
+
+	destroy_dynevents(TRACEFS_DYNEVENT_ALL);
+
+	for (i = 0; i < sevents_count; i++) {
+		synth[i] = tracefs_synth_alloc(tep,  sevents[i].name,
+					       sevents[i].start_system, sevents[i].start_event,
+					       sevents[i].end_system, sevents[i].end_event,
+					       sevents[i].start_match_field, sevents[i].end_match_field,
+					       sevents[i].match_name);
+		CU_TEST(synth[i] != NULL);
+	}
+
+	probes = calloc(count + 1, sizeof(*probes));
+	CU_TEST(probes != NULL);
+	CU_TEST(readlink("/proc/self/exe", self, sizeof(self)) > 0);
+	CU_TEST(asprintf(&target, "%s:0x%0*llx", self, (int)(sizeof(void *) * 2), FOFFSET) > 0);
+
+	for (i = 0; i < count; i++) {
+		switch (tests[i].type) {
+		case TRACEFS_DYNEVENT_UPROBE:
+			tests[i].address = target;
+			probes[i] = tracefs_uprobe_alloc(tests[i].system, tests[i].event,
+							   self, FOFFSET, tests[i].format);
+			break;
+		case TRACEFS_DYNEVENT_URETPROBE:
+			tests[i].address = target;
+			probes[i] = tracefs_uretprobe_alloc(tests[i].system, tests[i].event,
+							      self, FOFFSET, tests[i].format);
+			break;
+		case TRACEFS_DYNEVENT_KPROBE:
+			probes[i] = tracefs_kprobe_alloc(tests[i].system, tests[i].event,
+						  tests[i].address, tests[i].format);
+			break;
+		case TRACEFS_DYNEVENT_KRETPROBE:
+			probes[i] = tracefs_kretprobe_alloc(tests[i].system, tests[i].event,
+							    tests[i].address, tests[i].format, 0);
+			break;
+		case TRACEFS_DYNEVENT_EPROBE:
+			tmp = strdup(tests[i].address);
+			tsys = strtok_r(tmp, "./", &sav);
+			tevent = strtok_r(NULL, "", &sav);
+			probes[i] = tracefs_eprobe_alloc(tests[i].system, tests[i].event,
+							   tsys, tevent, tests[i].format);
+			free(tmp);
+			break;
+		default:
+			CU_TEST(0);
+			break;
+		}
+
+		CU_TEST(probes[i] != NULL);
+	}
+	probes[i] = NULL;
+
+	get_dynevents_check(TRACEFS_DYNEVENT_ALL, 0);
+	CU_TEST(check_probes(tests, count, probes, false, instance, tep));
+
+	for (i = 0; i < count; i++) {
+		CU_TEST(tracefs_dynevent_create(probes[i]) == 0);
+	}
+
+	get_dynevents_check(TRACEFS_DYNEVENT_ALL, count);
+
+	for (i = 0; i < sevents_count; i++) {
+		CU_TEST(tracefs_synth_create(synth[i]) == 0);
+	}
+
+	get_dynevents_check(TRACEFS_DYNEVENT_ALL, count + sevents_count);
+
+	CU_TEST(check_probes(tests, count, probes, true, instance, tep));
+
+	get_dynevents_check(TRACEFS_DYNEVENT_UPROBE, uprobes_cnt);
+	get_dynevents_check(TRACEFS_DYNEVENT_URETPROBE, uretprobes_cnt);
+	get_dynevents_check(TRACEFS_DYNEVENT_UPROBE |
+			    TRACEFS_DYNEVENT_URETPROBE, uprobes_cnt + uretprobes_cnt);
+
+	get_dynevents_check(TRACEFS_DYNEVENT_KPROBE, kprobes_cnt);
+	get_dynevents_check(TRACEFS_DYNEVENT_KRETPROBE, kretprobes_cnt);
+	get_dynevents_check(TRACEFS_DYNEVENT_KPROBE |
+			    TRACEFS_DYNEVENT_KRETPROBE, kprobes_cnt + kretprobes_cnt);
+
+	get_dynevents_check(TRACEFS_DYNEVENT_EPROBE, eprobes_cnt);
+
+	get_dynevents_check(TRACEFS_DYNEVENT_SYNTH, sevents_count);
+
+	get_dynevents_check(TRACEFS_DYNEVENT_EPROBE | TRACEFS_DYNEVENT_SYNTH,
+			    eprobes_cnt + sevents_count);
+
+
+	for (i = 0; i < count; i++) {
+		CU_TEST(tracefs_dynevent_destroy(probes[i], false) == 0);
+	}
+
+	get_dynevents_check(TRACEFS_DYNEVENT_ALL, sevents_count);
+
+	for (i = 0; i < sevents_count; i++) {
+		ret = tracefs_synth_destroy(synth[i]);
+		CU_TEST(ret == 0);
+	}
+
+	get_dynevents_check(TRACEFS_DYNEVENT_ALL, 0);
+
+	for (i = 0; i < count; i++)
+		tracefs_dynevent_free(probes[i]);
+
+	for (i = 0; i < sevents_count; i++)
+		tracefs_synth_free(synth[i]);
+
+	free(synth);
+	free(probes);
+	free(target);
+	tep_free(tep);
+}
+
+static void test_multi_probes(void)
+{
+	test_multi_probes_instance(test_instance);
+}
+
 static void test_instance_file(void)
 {
 	struct tracefs_instance *instance = NULL;
@@ -3693,4 +3854,5 @@ void test_tracefs_lib(void)
 	CU_add_test(suite, "synthetic events", test_synthetic);
 	CU_add_test(suite, "eprobes", test_eprobes);
 	CU_add_test(suite, "uprobes", test_uprobes);
+	CU_add_test(suite, "multi probe test", test_multi_probes);
 }
-- 
2.45.2


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 2/3] libtracefs utest: Fix min percent test
  2024-10-17 20:03 [PATCH 0/3] libtracefs utest: Fixes and new tests Steven Rostedt
  2024-10-17 20:03 ` [PATCH 1/3] libtracefs utest: Add test to check handling multiple dynamic events Steven Rostedt
@ 2024-10-17 20:03 ` Steven Rostedt
  2024-10-17 20:03 ` [PATCH 3/3] libtracefs utest: Do not test more events than what the ring buffer can hold Steven Rostedt
  2 siblings, 0 replies; 4+ messages in thread
From: Steven Rostedt @ 2024-10-17 20:03 UTC (permalink / raw)
  To: linux-trace-devel; +Cc: Steven Rostedt (Google), Adrien Nader

From: "Steven Rostedt (Google)" <rostedt@goodmis.org>

On PowerPC 64 which has 64K pages, it screws up the accounting of some
calculations used for tests. For instance, 1% of the ring buffer may not
be more than a page. So testing 1% and then subtracting the number of
events per page is going to lead to a negative number. This will obviously
fail.

Take into account that the subbuffer may be very large, and to make a
minimum percent to use in case a subbuffer size is greater than 1%.

Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=219358
Reported-by: Adrien Nader <adrien@notk.org>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
---
 utest/tracefs-utest.c | 11 +++++++++++
 1 file changed, 11 insertions(+)

diff --git a/utest/tracefs-utest.c b/utest/tracefs-utest.c
index 742f4546bef0..b5095a18bb16 100644
--- a/utest/tracefs-utest.c
+++ b/utest/tracefs-utest.c
@@ -1340,6 +1340,17 @@ static void test_cpu_read_buf_percent(struct test_cpu_data *data, int percent)
 
 	/* For percent == 0, just test for any data */
 	if (percent) {
+		int min_percent;
+
+		/*
+		 * For architectures like PowerPC with 64K PAGE_SIZE and thus
+		 * large sub buffers, where we will not have over 100 sub buffers
+		 * percent must at least cover more than 1 sub buffer.
+		 */
+		min_percent = (100 + (data->nr_subbufs - 1)) / data->nr_subbufs;
+		if (percent < min_percent)
+			percent = min_percent;
+
 		expect = data->nr_subbufs * data->events_per_buf * percent / 100;
 
 		/* Add just under the percent */
-- 
2.45.2


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 3/3] libtracefs utest: Do not test more events than what the ring buffer can hold
  2024-10-17 20:03 [PATCH 0/3] libtracefs utest: Fixes and new tests Steven Rostedt
  2024-10-17 20:03 ` [PATCH 1/3] libtracefs utest: Add test to check handling multiple dynamic events Steven Rostedt
  2024-10-17 20:03 ` [PATCH 2/3] libtracefs utest: Fix min percent test Steven Rostedt
@ 2024-10-17 20:03 ` Steven Rostedt
  2 siblings, 0 replies; 4+ messages in thread
From: Steven Rostedt @ 2024-10-17 20:03 UTC (permalink / raw)
  To: linux-trace-devel; +Cc: Steven Rostedt (Google), Adrien Nader

From: "Steven Rostedt (Google)" <rostedt@goodmis.org>

The test_instance_cpu_read() will test events_per_buf * 50 events. But on
PowerPC64 which has 64K size subbuffers, there may not be 50 subbuffers in
a per CPU ring buffer. That means the test will overflow and it will read
less events than expected.

Check to make sure that the number to test is not going to add more events
than what a single ring buffer can hold.

Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=219358
Reported-by: Adrien Nader <adrien@notk.org>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
---
 utest/tracefs-utest.c | 11 ++++++++++-
 1 file changed, 10 insertions(+), 1 deletion(-)

diff --git a/utest/tracefs-utest.c b/utest/tracefs-utest.c
index b5095a18bb16..46fa31c88cf6 100644
--- a/utest/tracefs-utest.c
+++ b/utest/tracefs-utest.c
@@ -1270,6 +1270,8 @@ static void test_cpu_read(struct test_cpu_data *data, int expect)
 static void test_instance_trace_cpu_read(struct tracefs_instance *instance, bool map)
 {
 	struct test_cpu_data data;
+	size_t buffer_size;
+	int big_num = 50;
 
 	if (setup_trace_cpu(instance, &data, true, map))
 		return;
@@ -1278,7 +1280,14 @@ static void test_instance_trace_cpu_read(struct tracefs_instance *instance, bool
 	test_cpu_read(&data, data.events_per_buf / 2);
 	test_cpu_read(&data, data.events_per_buf);
 	test_cpu_read(&data, data.events_per_buf + 1);
-	test_cpu_read(&data, data.events_per_buf * 50);
+
+	buffer_size = tracefs_instance_get_buffer_size(instance, 0) * 1024;
+	if (data.events_per_buf * big_num > (buffer_size - data.events_per_buf)) {
+		big_num = (buffer_size / data.events_per_buf);
+		big_num -= data.events_per_buf * 2;
+		CU_TEST(big_num > 0);
+	}
+	test_cpu_read(&data, data.events_per_buf * big_num);
 
 	shutdown_trace_cpu(&data);
 }
-- 
2.45.2


^ permalink raw reply related	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2024-10-17 20:05 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-10-17 20:03 [PATCH 0/3] libtracefs utest: Fixes and new tests Steven Rostedt
2024-10-17 20:03 ` [PATCH 1/3] libtracefs utest: Add test to check handling multiple dynamic events Steven Rostedt
2024-10-17 20:03 ` [PATCH 2/3] libtracefs utest: Fix min percent test Steven Rostedt
2024-10-17 20:03 ` [PATCH 3/3] libtracefs utest: Do not test more events than what the ring buffer can hold Steven Rostedt

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).