* [PATCH] perf bench: Add -t/--threads option to perf bench mem mmap
@ 2025-12-07 8:57 Namhyung Kim
2025-12-09 13:01 ` James Clark
0 siblings, 1 reply; 2+ messages in thread
From: Namhyung Kim @ 2025-12-07 8:57 UTC (permalink / raw)
To: Arnaldo Carvalho de Melo, Ian Rogers, James Clark
Cc: Jiri Olsa, Adrian Hunter, Peter Zijlstra, Ingo Molnar, LKML,
linux-perf-users, Ankur Arora
So that it can measure overhead of mmap_lock and/or per-VMA lock
contention.
$ perf bench mem mmap -f demand -l 1000 -t 1
# Running 'mem/mmap' benchmark:
# function 'demand' (Demand loaded mmap())
# Copying 1MB bytes ...
2.914503 GB/sec
$ perf bench mem mmap -f demand -l 1000 -t 2
# Running 'mem/mmap' benchmark:
# function 'demand' (Demand loaded mmap())
# Copying 1MB bytes ...
888.769991 MB/sec
$ perf bench mem mmap -f demand -l 1000 -t 3
# Running 'mem/mmap' benchmark:
# function 'demand' (Demand loaded mmap())
# Copying 1MB bytes ...
757.658220 MB/sec
$ perf bench mem mmap -f demand -l 1000 -t 4
# Running 'mem/mmap' benchmark:
# function 'demand' (Demand loaded mmap())
# Copying 1MB bytes ...
316.410713 MB/sec
Cc: Ankur Arora <ankur.a.arora@oracle.com>
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
---
tools/perf/Documentation/perf-bench.txt | 4 ++
tools/perf/bench/mem-functions.c | 74 +++++++++++++++++++++----
2 files changed, 67 insertions(+), 11 deletions(-)
diff --git a/tools/perf/Documentation/perf-bench.txt b/tools/perf/Documentation/perf-bench.txt
index 1160224cb718392d..c5913cf59c988421 100644
--- a/tools/perf/Documentation/perf-bench.txt
+++ b/tools/perf/Documentation/perf-bench.txt
@@ -274,6 +274,10 @@ Repeat mmap() invocation this number of times.
--cycles::
Use perf's cpu-cycles event instead of gettimeofday syscall.
+-t::
+--threads=<NUM>::
+Create multiple threads to call mmap/munmap concurrently.
+
SUITES FOR 'numa'
~~~~~~~~~~~~~~~~~
*mem*::
diff --git a/tools/perf/bench/mem-functions.c b/tools/perf/bench/mem-functions.c
index 2908a3a796c932d0..e7e7d0b41fc7720f 100644
--- a/tools/perf/bench/mem-functions.c
+++ b/tools/perf/bench/mem-functions.c
@@ -26,6 +26,7 @@
#include <errno.h>
#include <linux/time64.h>
#include <linux/log2.h>
+#include <pthread.h>
#define K 1024
@@ -41,6 +42,7 @@ static unsigned int nr_loops = 1;
static bool use_cycles;
static int cycles_fd;
static unsigned int seed;
+static unsigned int nr_threads = 1;
static const struct option bench_common_options[] = {
OPT_STRING('s', "size", &size_str, "1MB",
@@ -174,7 +176,7 @@ static void clock_accum(union bench_clock *a, union bench_clock *b)
static double timeval2double(struct timeval *ts)
{
- return (double)ts->tv_sec + (double)ts->tv_usec / (double)USEC_PER_SEC;
+ return (double)ts->tv_sec + (double)ts->tv_usec / (double)USEC_PER_SEC / nr_threads;
}
#define print_bps(x) do { \
@@ -494,16 +496,27 @@ static void mmap_page_touch(void *dst, size_t size, unsigned int page_shift, boo
}
}
-static int do_mmap(const struct function *r, struct bench_params *p,
- void *src __maybe_unused, void *dst __maybe_unused,
- union bench_clock *accum)
+struct mmap_data {
+ pthread_t id;
+ const struct function *func;
+ struct bench_params *params;
+ union bench_clock result;
+ unsigned int seed;
+ int error;
+};
+
+static void *do_mmap_thread(void *arg)
{
+ struct mmap_data *data = arg;
+ const struct function *r = data->func;
+ struct bench_params *p = data->params;
union bench_clock start, end, diff;
mmap_op_t fn = r->fn.mmap_op;
bool populate = strcmp(r->name, "populate") == 0;
+ void *dst;
- if (p->seed)
- srand(p->seed);
+ if (data->seed)
+ srand(data->seed);
for (unsigned int i = 0; i < p->nr_loops; i++) {
clock_get(&start);
@@ -514,16 +527,53 @@ static int do_mmap(const struct function *r, struct bench_params *p,
fn(dst, p->size, p->page_shift, p->seed);
clock_get(&end);
diff = clock_diff(&start, &end);
- clock_accum(accum, &diff);
+ clock_accum(&data->result, &diff);
bench_munmap(dst, p->size);
}
- return 0;
+ return data;
out:
- printf("# Memory allocation failed - maybe size (%s) %s?\n", size_str,
- p->page_shift != PAGE_SHIFT_4KB ? "has insufficient hugepages" : "is too large");
- return -1;
+ data->error = -ENOMEM;
+ return NULL;
+}
+
+static int do_mmap(const struct function *r, struct bench_params *p,
+ void *src __maybe_unused, void *dst __maybe_unused,
+ union bench_clock *accum)
+{
+ struct mmap_data *data;
+ int error = 0;
+
+ data = calloc(nr_threads, sizeof(*data));
+ if (!data) {
+ printf("# Failed to allocate thread resources\n");
+ return -1;
+ }
+
+ for (unsigned int i = 0; i < nr_threads; i++) {
+ data[i].func = r;
+ data[i].params = p;
+ if (p->seed)
+ data[i].seed = p->seed + i;
+
+ if (pthread_create(&data[i].id, NULL, do_mmap_thread, &data[i]) < 0)
+ data[i].error = -errno;
+ }
+
+ for (unsigned int i = 0; i < nr_threads; i++) {
+ pthread_join(data[i].id, NULL);
+
+ clock_accum(accum, &data[i].result);
+ error |= data[i].error;
+ }
+ free(data);
+
+ if (error) {
+ printf("# Memory allocation failed - maybe size (%s) %s?\n", size_str,
+ p->page_shift != PAGE_SHIFT_4KB ? "has insufficient hugepages" : "is too large");
+ }
+ return error ? -1 : 0;
}
static const char * const bench_mem_mmap_usage[] = {
@@ -548,6 +598,8 @@ int bench_mem_mmap(int argc, const char **argv)
static const struct option bench_mmap_options[] = {
OPT_UINTEGER('r', "randomize", &seed,
"Seed to randomize page access offset."),
+ OPT_UINTEGER('t', "threads", &nr_threads,
+ "Number of threads to run concurrently (default: 1)."),
OPT_PARENT(bench_common_options),
OPT_END()
};
--
2.52.0.223.gf5cc29aaa4-goog
^ permalink raw reply related [flat|nested] 2+ messages in thread* Re: [PATCH] perf bench: Add -t/--threads option to perf bench mem mmap
2025-12-07 8:57 [PATCH] perf bench: Add -t/--threads option to perf bench mem mmap Namhyung Kim
@ 2025-12-09 13:01 ` James Clark
0 siblings, 0 replies; 2+ messages in thread
From: James Clark @ 2025-12-09 13:01 UTC (permalink / raw)
To: Namhyung Kim, Arnaldo Carvalho de Melo, Ian Rogers
Cc: Jiri Olsa, Adrian Hunter, Peter Zijlstra, Ingo Molnar, LKML,
linux-perf-users, Ankur Arora
On 07/12/2025 8:57 am, Namhyung Kim wrote:
> So that it can measure overhead of mmap_lock and/or per-VMA lock
> contention.
>
> $ perf bench mem mmap -f demand -l 1000 -t 1
> # Running 'mem/mmap' benchmark:
> # function 'demand' (Demand loaded mmap())
> # Copying 1MB bytes ...
>
> 2.914503 GB/sec
>
> $ perf bench mem mmap -f demand -l 1000 -t 2
> # Running 'mem/mmap' benchmark:
> # function 'demand' (Demand loaded mmap())
> # Copying 1MB bytes ...
>
> 888.769991 MB/sec
>
> $ perf bench mem mmap -f demand -l 1000 -t 3
> # Running 'mem/mmap' benchmark:
> # function 'demand' (Demand loaded mmap())
> # Copying 1MB bytes ...
>
> 757.658220 MB/sec
>
> $ perf bench mem mmap -f demand -l 1000 -t 4
> # Running 'mem/mmap' benchmark:
> # function 'demand' (Demand loaded mmap())
> # Copying 1MB bytes ...
>
> 316.410713 MB/sec
Should this now say "MB/sec per thread" for nr_threads > 1? I think it
could be interpreted either way without a label, but I see you divided
by nr_threads in timeval2double().
>
> Cc: Ankur Arora <ankur.a.arora@oracle.com>
> Signed-off-by: Namhyung Kim <namhyung@kernel.org>
> ---
> tools/perf/Documentation/perf-bench.txt | 4 ++
> tools/perf/bench/mem-functions.c | 74 +++++++++++++++++++++----
> 2 files changed, 67 insertions(+), 11 deletions(-)
>
> diff --git a/tools/perf/Documentation/perf-bench.txt b/tools/perf/Documentation/perf-bench.txt
> index 1160224cb718392d..c5913cf59c988421 100644
> --- a/tools/perf/Documentation/perf-bench.txt
> +++ b/tools/perf/Documentation/perf-bench.txt
> @@ -274,6 +274,10 @@ Repeat mmap() invocation this number of times.
> --cycles::
> Use perf's cpu-cycles event instead of gettimeofday syscall.
>
> +-t::
> +--threads=<NUM>::
> +Create multiple threads to call mmap/munmap concurrently.
> +
> SUITES FOR 'numa'
> ~~~~~~~~~~~~~~~~~
> *mem*::
> diff --git a/tools/perf/bench/mem-functions.c b/tools/perf/bench/mem-functions.c
> index 2908a3a796c932d0..e7e7d0b41fc7720f 100644
> --- a/tools/perf/bench/mem-functions.c
> +++ b/tools/perf/bench/mem-functions.c
> @@ -26,6 +26,7 @@
> #include <errno.h>
> #include <linux/time64.h>
> #include <linux/log2.h>
> +#include <pthread.h>
>
> #define K 1024
>
> @@ -41,6 +42,7 @@ static unsigned int nr_loops = 1;
> static bool use_cycles;
> static int cycles_fd;
> static unsigned int seed;
> +static unsigned int nr_threads = 1;
>
> static const struct option bench_common_options[] = {
> OPT_STRING('s', "size", &size_str, "1MB",
> @@ -174,7 +176,7 @@ static void clock_accum(union bench_clock *a, union bench_clock *b)
>
> static double timeval2double(struct timeval *ts)
> {
> - return (double)ts->tv_sec + (double)ts->tv_usec / (double)USEC_PER_SEC;
> + return (double)ts->tv_sec + (double)ts->tv_usec / (double)USEC_PER_SEC / nr_threads;
> }
>
> #define print_bps(x) do { \
> @@ -494,16 +496,27 @@ static void mmap_page_touch(void *dst, size_t size, unsigned int page_shift, boo
> }
> }
>
> -static int do_mmap(const struct function *r, struct bench_params *p,
> - void *src __maybe_unused, void *dst __maybe_unused,
> - union bench_clock *accum)
> +struct mmap_data {
> + pthread_t id;
> + const struct function *func;
> + struct bench_params *params;
> + union bench_clock result;
> + unsigned int seed;
> + int error;
> +};
> +
> +static void *do_mmap_thread(void *arg)
> {
> + struct mmap_data *data = arg;
> + const struct function *r = data->func;
> + struct bench_params *p = data->params;
> union bench_clock start, end, diff;
> mmap_op_t fn = r->fn.mmap_op;
> bool populate = strcmp(r->name, "populate") == 0;
> + void *dst;
>
> - if (p->seed)
> - srand(p->seed);
> + if (data->seed)
> + srand(data->seed);
>
> for (unsigned int i = 0; i < p->nr_loops; i++) {
> clock_get(&start);
> @@ -514,16 +527,53 @@ static int do_mmap(const struct function *r, struct bench_params *p,
> fn(dst, p->size, p->page_shift, p->seed);
> clock_get(&end);
> diff = clock_diff(&start, &end);
> - clock_accum(accum, &diff);
> + clock_accum(&data->result, &diff);
>
> bench_munmap(dst, p->size);
> }
>
> - return 0;
> + return data;
> out:
> - printf("# Memory allocation failed - maybe size (%s) %s?\n", size_str,
> - p->page_shift != PAGE_SHIFT_4KB ? "has insufficient hugepages" : "is too large");
> - return -1;
> + data->error = -ENOMEM;
> + return NULL;
> +}
> +
> +static int do_mmap(const struct function *r, struct bench_params *p,
> + void *src __maybe_unused, void *dst __maybe_unused,
> + union bench_clock *accum)
> +{
> + struct mmap_data *data;
> + int error = 0;
> +
> + data = calloc(nr_threads, sizeof(*data));
> + if (!data) {
> + printf("# Failed to allocate thread resources\n");
> + return -1;
> + }
> +
> + for (unsigned int i = 0; i < nr_threads; i++) {
> + data[i].func = r;
> + data[i].params = p;
> + if (p->seed)
> + data[i].seed = p->seed + i;
> +
> + if (pthread_create(&data[i].id, NULL, do_mmap_thread, &data[i]) < 0)
> + data[i].error = -errno;
> + }
> +
> + for (unsigned int i = 0; i < nr_threads; i++) {
> + pthread_join(data[i].id, NULL);
> +
> + clock_accum(accum, &data[i].result);
> + error |= data[i].error;
> + }
> + free(data);
> +
> + if (error) {
> + printf("# Memory allocation failed - maybe size (%s) %s?\n", size_str,
> + p->page_shift != PAGE_SHIFT_4KB ? "has insufficient hugepages" : "is too large");
> + }
> + return error ? -1 : 0;
> }
>
> static const char * const bench_mem_mmap_usage[] = {
> @@ -548,6 +598,8 @@ int bench_mem_mmap(int argc, const char **argv)
> static const struct option bench_mmap_options[] = {
> OPT_UINTEGER('r', "randomize", &seed,
> "Seed to randomize page access offset."),
> + OPT_UINTEGER('t', "threads", &nr_threads,
> + "Number of threads to run concurrently (default: 1)."),
> OPT_PARENT(bench_common_options),
> OPT_END()
> };
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2025-12-09 13:01 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-12-07 8:57 [PATCH] perf bench: Add -t/--threads option to perf bench mem mmap Namhyung Kim
2025-12-09 13:01 ` James Clark
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).