* [PATCH RESEND v4 01/11] perf: Add print_separator to util
2025-09-09 11:42 [PATCH RESEND v4 00/11] perf sched: Introduce stats tool Swapnil Sapkal
@ 2025-09-09 11:42 ` Swapnil Sapkal
2025-09-09 11:42 ` [PATCH RESEND v4 02/11] tools/lib: Add list_is_first() Swapnil Sapkal
` (9 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: Swapnil Sapkal @ 2025-09-09 11:42 UTC (permalink / raw)
To: peterz, mingo, acme, namhyung, irogers, james.clark
Cc: ravi.bangoria, swapnil.sapkal, yu.c.chen, mark.rutland,
alexander.shishkin, jolsa, rostedt, vincent.guittot,
adrian.hunter, kan.liang, gautham.shenoy, kprateek.nayak,
juri.lelli, yangjihong, void, tj, sshegde, ctshao, quic_zhonhan,
thomas.falcon, blakejones, ashelat, leo.yan, dvyukov, ak,
yujie.liu, graham.woodward, ben.gainey, vineethr, tim.c.chen,
linux, linux-kernel, linux-perf-users, santosh.shukla,
sandipan.das
Add print_separator to util.c and use it wherever necessary.
Signed-off-by: Swapnil Sapkal <swapnil.sapkal@amd.com>
---
tools/perf/builtin-kwork.c | 13 ++++---------
tools/perf/util/util.c | 6 ++++++
tools/perf/util/util.h | 2 ++
3 files changed, 12 insertions(+), 9 deletions(-)
diff --git a/tools/perf/builtin-kwork.c b/tools/perf/builtin-kwork.c
index d2e08de5976d..842f59ff85ac 100644
--- a/tools/perf/builtin-kwork.c
+++ b/tools/perf/builtin-kwork.c
@@ -1340,11 +1340,6 @@ static struct kwork_class *kwork_class_supported_list[KWORK_CLASS_MAX] = {
[KWORK_CLASS_SCHED] = &kwork_sched,
};
-static void print_separator(int len)
-{
- printf(" %.*s\n", len, graph_dotted_line);
-}
-
static int report_print_work(struct perf_kwork *kwork, struct kwork_work *work)
{
int ret = 0;
@@ -1458,7 +1453,7 @@ static int report_print_header(struct perf_kwork *kwork)
}
printf("\n");
- print_separator(ret);
+ print_separator(ret, "", 0);
return ret;
}
@@ -1633,7 +1628,7 @@ static void top_print_header(struct perf_kwork *kwork __maybe_unused)
PRINT_RUNTIME_HEADER_WIDTH + RPINT_DECIMAL_WIDTH, "RUNTIME",
PRINT_TASK_NAME_WIDTH, "COMMAND");
printf("\n ");
- print_separator(ret);
+ print_separator(ret, "", 0);
}
static int top_print_work(struct perf_kwork *kwork __maybe_unused, struct kwork_work *work)
@@ -1933,11 +1928,11 @@ static int perf_kwork__report(struct perf_kwork *kwork)
}
next = rb_next(next);
}
- print_separator(ret);
+ print_separator(ret, "", 0);
if (kwork->summary) {
print_summary(kwork);
- print_separator(ret);
+ print_separator(ret, "", 0);
}
print_bad_events(kwork);
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 0f031eb80b4c..1b91834e11de 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -257,6 +257,12 @@ static int rm_rf_kcore_dir(const char *path)
return 0;
}
+void print_separator(int pre_dash_cnt, const char *s, int post_dash_cnt)
+{
+ printf("%.*s%s%.*s\n", pre_dash_cnt, graph_dotted_line, s, post_dash_cnt,
+ graph_dotted_line);
+}
+
int rm_rf_perf_data(const char *path)
{
const char *pat[] = {
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 3423778e39a5..de69384380c2 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -48,6 +48,8 @@ bool sysctl__nmi_watchdog_enabled(void);
int perf_tip(char **strp, const char *dirpath);
+void print_separator(int pre_dash_cnt, const char *s, int post_dash_cnt);
+
#ifndef HAVE_SCHED_GETCPU_SUPPORT
int sched_getcpu(void);
#endif
--
2.43.0
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH RESEND v4 02/11] tools/lib: Add list_is_first()
2025-09-09 11:42 [PATCH RESEND v4 00/11] perf sched: Introduce stats tool Swapnil Sapkal
2025-09-09 11:42 ` [PATCH RESEND v4 01/11] perf: Add print_separator to util Swapnil Sapkal
@ 2025-09-09 11:42 ` Swapnil Sapkal
2025-09-09 11:42 ` [PATCH RESEND v4 03/11] perf header: Support CPU DOMAIN relation info Swapnil Sapkal
` (8 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: Swapnil Sapkal @ 2025-09-09 11:42 UTC (permalink / raw)
To: peterz, mingo, acme, namhyung, irogers, james.clark
Cc: ravi.bangoria, swapnil.sapkal, yu.c.chen, mark.rutland,
alexander.shishkin, jolsa, rostedt, vincent.guittot,
adrian.hunter, kan.liang, gautham.shenoy, kprateek.nayak,
juri.lelli, yangjihong, void, tj, sshegde, ctshao, quic_zhonhan,
thomas.falcon, blakejones, ashelat, leo.yan, dvyukov, ak,
yujie.liu, graham.woodward, ben.gainey, vineethr, tim.c.chen,
linux, linux-kernel, linux-perf-users, santosh.shukla,
sandipan.das
Add list_is_first() to check whether @list is the first entry in list @head
Signed-off-by: Swapnil Sapkal <swapnil.sapkal@amd.com>
---
tools/include/linux/list.h | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/tools/include/linux/list.h b/tools/include/linux/list.h
index a4dfb6a7cc6a..a692ff7aed5c 100644
--- a/tools/include/linux/list.h
+++ b/tools/include/linux/list.h
@@ -169,6 +169,16 @@ static inline void list_move_tail(struct list_head *list,
list_add_tail(list, head);
}
+/**
+ * list_is_first -- tests whether @list is the first entry in list @head
+ * @list: the entry to test
+ * @head: the head of the list
+ */
+static inline int list_is_first(const struct list_head *list, const struct list_head *head)
+{
+ return list->prev == head;
+}
+
/**
* list_is_last - tests whether @list is the last entry in list @head
* @list: the entry to test
--
2.43.0
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH RESEND v4 03/11] perf header: Support CPU DOMAIN relation info
2025-09-09 11:42 [PATCH RESEND v4 00/11] perf sched: Introduce stats tool Swapnil Sapkal
2025-09-09 11:42 ` [PATCH RESEND v4 01/11] perf: Add print_separator to util Swapnil Sapkal
2025-09-09 11:42 ` [PATCH RESEND v4 02/11] tools/lib: Add list_is_first() Swapnil Sapkal
@ 2025-09-09 11:42 ` Swapnil Sapkal
2025-09-17 6:20 ` kernel test robot
2025-09-09 11:42 ` [PATCH RESEND v4 04/11] perf sched stats: Add record and rawdump support Swapnil Sapkal
` (7 subsequent siblings)
10 siblings, 1 reply; 13+ messages in thread
From: Swapnil Sapkal @ 2025-09-09 11:42 UTC (permalink / raw)
To: peterz, mingo, acme, namhyung, irogers, james.clark
Cc: ravi.bangoria, swapnil.sapkal, yu.c.chen, mark.rutland,
alexander.shishkin, jolsa, rostedt, vincent.guittot,
adrian.hunter, kan.liang, gautham.shenoy, kprateek.nayak,
juri.lelli, yangjihong, void, tj, sshegde, ctshao, quic_zhonhan,
thomas.falcon, blakejones, ashelat, leo.yan, dvyukov, ak,
yujie.liu, graham.woodward, ben.gainey, vineethr, tim.c.chen,
linux, linux-kernel, linux-perf-users, santosh.shukla,
sandipan.das
'/proc/schedstat' gives the info about load balancing statistics within
a given domain. It also contains the cpu_mask giving information about
the sibling cpus and domain names after schedstat version 17. Storing
this information in perf header will help tools like `perf sched stats`
for better analysis.
Signed-off-by: Swapnil Sapkal <swapnil.sapkal@amd.com>
---
.../Documentation/perf.data-file-format.txt | 17 +
tools/perf/builtin-inject.c | 1 +
tools/perf/util/env.h | 16 +
| 304 ++++++++++++++++++
| 1 +
tools/perf/util/util.c | 42 +++
tools/perf/util/util.h | 3 +
7 files changed, 384 insertions(+)
diff --git a/tools/perf/Documentation/perf.data-file-format.txt b/tools/perf/Documentation/perf.data-file-format.txt
index cd95ba09f727..92dbba1003cf 100644
--- a/tools/perf/Documentation/perf.data-file-format.txt
+++ b/tools/perf/Documentation/perf.data-file-format.txt
@@ -437,6 +437,23 @@ struct {
} [nr_pmu];
};
+ HEADER_CPU_DOMAIN_INFO = 32,
+
+List of cpu-domain relation info. The format of the data is as below.
+
+struct domain_info {
+ int domain;
+ char dname[];
+ char cpumask[];
+ char cpulist[];
+};
+
+struct cpu_domain_info {
+ int cpu;
+ int nr_domains;
+ struct domain_info domains[];
+};
+
other bits are reserved and should ignored for now
HEADER_FEAT_BITS = 256,
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index a114b3fa1bea..f43a7ec44b5f 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -2058,6 +2058,7 @@ static bool keep_feat(int feat)
case HEADER_CLOCK_DATA:
case HEADER_HYBRID_TOPOLOGY:
case HEADER_PMU_CAPS:
+ case HEADER_CPU_DOMAIN_INFO:
return true;
/* Information that can be updated */
case HEADER_BUILD_ID:
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index e00179787a34..71034c4b4488 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -54,6 +54,19 @@ struct pmu_caps {
char *pmu_name;
};
+struct domain_info {
+ u32 domain;
+ char *dname;
+ char *cpumask;
+ char *cpulist;
+};
+
+struct cpu_domain_map {
+ u32 cpu;
+ u32 nr_domains;
+ struct domain_info **domains;
+};
+
typedef const char *(arch_syscalls__strerrno_t)(int err);
struct perf_env {
@@ -70,6 +83,8 @@ struct perf_env {
unsigned int max_branches;
unsigned int br_cntr_nr;
unsigned int br_cntr_width;
+ unsigned int schedstat_version;
+ unsigned int max_sched_domains;
int kernel_is_64_bit;
int nr_cmdline;
@@ -92,6 +107,7 @@ struct perf_env {
char **cpu_pmu_caps;
struct cpu_topology_map *cpu;
struct cpu_cache_level *caches;
+ struct cpu_domain_map **cpu_domain;
int caches_cnt;
u32 comp_ratio;
u32 comp_ver;
--git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 4f2a6e10ed5c..7ff7434bac2c 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1621,6 +1621,184 @@ static int write_pmu_caps(struct feat_fd *ff,
return 0;
}
+static void free_cpu_domain_info(struct cpu_domain_map **cd_map, u32 schedstat_version, u32 nr)
+{
+ for (u32 i = 0; i < nr; i++) {
+ if (cd_map[i]->domains) {
+ for (u32 j = 0; j < cd_map[i]->nr_domains; j++) {
+ struct domain_info *d_info = cd_map[i]->domains[j];
+
+ if (schedstat_version >= 17)
+ free(d_info->dname);
+
+ free(d_info->cpumask);
+ free(d_info->cpulist);
+ }
+ free(cd_map[i]->domains);
+ }
+ }
+
+ free(cd_map);
+}
+
+static struct cpu_domain_map **build_cpu_domain_map(u32 *schedstat_version, u32 *max_sched_domains,
+ u32 nr)
+{
+ struct domain_info *domain_info;
+ struct cpu_domain_map **cd_map;
+ char dname[16], cpumask[256];
+ char cpulist[1024];
+ char *line = NULL;
+ u32 cpu, domain;
+ u32 dcount = 0;
+ size_t len;
+ FILE *fp;
+
+ fp = fopen("/proc/schedstat", "r");
+ if (!fp) {
+ pr_err("Failed to open /proc/schedstat\n");
+ return NULL;
+ }
+
+ cd_map = calloc(nr, sizeof(*cd_map));
+ if (!cd_map)
+ goto out;
+
+ while (getline(&line, &len, fp) > 0) {
+ int retval;
+
+ if (strncmp(line, "version", 7) == 0) {
+ retval = sscanf(line, "version %d\n", schedstat_version);
+ if (retval != 1)
+ continue;
+
+ } else if (strncmp(line, "cpu", 3) == 0) {
+ retval = sscanf(line, "cpu%u %*s", &cpu);
+ if (retval == 1) {
+ cd_map[cpu] = calloc(1, sizeof(*cd_map[cpu]));
+ if (!cd_map[cpu])
+ goto out_free_line;
+ cd_map[cpu]->cpu = cpu;
+ } else
+ continue;
+
+ dcount = 0;
+ } else if (strncmp(line, "domain", 6) == 0) {
+ dcount++;
+
+ cd_map[cpu]->domains = realloc(cd_map[cpu]->domains,
+ dcount * sizeof(domain_info));
+ if (!cd_map[cpu]->domains)
+ goto out_free_line;
+
+ domain_info = calloc(1, sizeof(*domain_info));
+ if (!domain_info)
+ goto out_free_line;
+
+ cd_map[cpu]->domains[dcount - 1] = domain_info;
+
+ if (*schedstat_version >= 17) {
+ retval = sscanf(line, "domain%u %s %s %*s", &domain, dname,
+ cpumask);
+ if (retval != 3)
+ continue;
+
+ domain_info->dname = calloc(strlen(dname) + 1, sizeof(char));
+ if (!domain_info->dname)
+ goto out_free_line;
+
+ strcpy(domain_info->dname, dname);
+ } else {
+ retval = sscanf(line, "domain%u %s %*s", &domain, cpumask);
+ if (retval != 2)
+ continue;
+ }
+
+ domain_info->domain = domain;
+ if (domain > *max_sched_domains)
+ *max_sched_domains = domain;
+
+ domain_info->cpumask = calloc(strlen(cpumask) + 1, sizeof(char));
+ if (!domain_info->cpumask)
+ goto out_free_line;
+
+ strcpy(domain_info->cpumask, cpumask);
+
+ cpumask_to_cpulist(cpumask, cpulist);
+ domain_info->cpulist = calloc(strlen(cpulist) + 1, sizeof(char));
+ if (!domain_info->cpulist)
+ goto out_free_line;
+
+ strcpy(domain_info->cpulist, cpulist);
+ cd_map[cpu]->nr_domains = dcount;
+ }
+ }
+
+out_free_line:
+ free(line);
+out:
+ fclose(fp);
+ return cd_map;
+}
+
+static int write_cpu_domain_info(struct feat_fd *ff,
+ struct evlist *evlist __maybe_unused)
+{
+ u32 max_sched_domains = 0, schedstat_version = 0;
+ struct cpu_domain_map **cd_map;
+ u32 i, j, nr, ret;
+
+ nr = cpu__max_present_cpu().cpu;
+
+ cd_map = build_cpu_domain_map(&schedstat_version, &max_sched_domains, nr);
+ if (!cd_map)
+ return -1;
+
+ ret = do_write(ff, &schedstat_version, sizeof(u32));
+ if (ret < 0)
+ goto out;
+
+ max_sched_domains += 1;
+ ret = do_write(ff, &max_sched_domains, sizeof(u32));
+ if (ret < 0)
+ goto out;
+
+ for (i = 0; i < nr; i++) {
+ if (cd_map[i]->domains) {
+ ret = do_write(ff, &cd_map[i]->cpu, sizeof(u32));
+ if (ret < 0)
+ goto out;
+
+ ret = do_write(ff, &cd_map[i]->nr_domains, sizeof(u32));
+ if (ret < 0)
+ goto out;
+
+ for (j = 0; j < cd_map[i]->nr_domains; j++) {
+ ret = do_write(ff, &cd_map[i]->domains[j]->domain, sizeof(u32));
+ if (ret < 0)
+ goto out;
+ if (schedstat_version >= 17) {
+ ret = do_write_string(ff, cd_map[i]->domains[j]->dname);
+ if (ret < 0)
+ goto out;
+ }
+
+ ret = do_write_string(ff, cd_map[i]->domains[j]->cpumask);
+ if (ret < 0)
+ goto out;
+
+ ret = do_write_string(ff, cd_map[i]->domains[j]->cpulist);
+ if (ret < 0)
+ goto out;
+ }
+ }
+ }
+
+out:
+ free_cpu_domain_info(cd_map, schedstat_version, nr);
+ return ret;
+}
+
static void print_hostname(struct feat_fd *ff, FILE *fp)
{
fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname);
@@ -2254,6 +2432,35 @@ static void print_mem_topology(struct feat_fd *ff, FILE *fp)
}
}
+static void print_cpu_domain_info(struct feat_fd *ff, FILE *fp)
+{
+ struct cpu_domain_map **cd_map = ff->ph->env.cpu_domain;
+ u32 nr = ff->ph->env.nr_cpus_avail;
+ struct domain_info *d_info;
+ u32 i, j;
+
+ fprintf(fp, "# schedstat version : %u\n", ff->ph->env.schedstat_version);
+ fprintf(fp, "# Maximum sched domains : %u\n", ff->ph->env.max_sched_domains);
+
+ for (i = 0; i < nr; i++) {
+ if (cd_map[i]->domains) {
+ fprintf(fp, "# cpu : %u\n", cd_map[i]->cpu);
+ fprintf(fp, "# nr_domains : %u\n", cd_map[i]->nr_domains);
+
+ for (j = 0; j < cd_map[i]->nr_domains; j++) {
+ d_info = cd_map[i]->domains[j];
+ fprintf(fp, "# Domain : %u\n", d_info->domain);
+
+ if (ff->ph->env.schedstat_version >= 17)
+ fprintf(fp, "# Domain name : %s\n", d_info->dname);
+
+ fprintf(fp, "# Domain cpu map : %s\n", d_info->cpumask);
+ fprintf(fp, "# Domain cpu list : %s\n", d_info->cpulist);
+ }
+ }
+ }
+}
+
static int __event_process_build_id(struct perf_record_header_build_id *bev,
char *filename,
struct perf_session *session)
@@ -3395,6 +3602,102 @@ static int process_pmu_caps(struct feat_fd *ff, void *data __maybe_unused)
return ret;
}
+static int process_cpu_domain_info(struct feat_fd *ff, void *data __maybe_unused)
+{
+ u32 schedstat_version, max_sched_domains, cpu, domain, nr_domains;
+ struct perf_env *env = &ff->ph->env;
+ char *dname, *cpumask, *cpulist;
+ struct cpu_domain_map **cd_map;
+ struct domain_info *d_info;
+ u32 nra, nr, i, j;
+ int ret;
+
+ nra = env->nr_cpus_avail;
+ nr = env->nr_cpus_online;
+
+ cd_map = calloc(nra, sizeof(*cd_map));
+ if (!cd_map)
+ return -1;
+
+ env->cpu_domain = cd_map;
+
+ ret = do_read_u32(ff, &schedstat_version);
+ if (ret)
+ return ret;
+
+ env->schedstat_version = schedstat_version;
+
+ ret = do_read_u32(ff, &max_sched_domains);
+ if (ret)
+ return ret;
+
+ env->max_sched_domains = max_sched_domains;
+
+ for (i = 0; i < nr; i++) {
+ if (do_read_u32(ff, &cpu))
+ return -1;
+
+ cd_map[cpu] = calloc(1, sizeof(*cd_map[cpu]));
+ if (!cd_map[cpu])
+ return -1;
+
+ cd_map[cpu]->cpu = cpu;
+
+ if (do_read_u32(ff, &nr_domains))
+ return -1;
+
+ cd_map[cpu]->nr_domains = nr_domains;
+
+ cd_map[cpu]->domains = calloc(max_sched_domains, sizeof(*d_info));
+ if (!cd_map[cpu]->domains)
+ return -1;
+
+ for (j = 0; j < nr_domains; j++) {
+ if (do_read_u32(ff, &domain))
+ return -1;
+
+ d_info = calloc(1, sizeof(*d_info));
+ if (!d_info)
+ return -1;
+
+ cd_map[cpu]->domains[domain] = d_info;
+ d_info->domain = domain;
+
+ if (schedstat_version >= 17) {
+ dname = do_read_string(ff);
+ if (!dname)
+ return -1;
+
+ d_info->dname = calloc(strlen(dname) + 1, sizeof(char));
+ if (!d_info->dname)
+ return -1;
+
+ strcpy(d_info->dname, dname);
+ }
+
+ cpumask = do_read_string(ff);
+ if (!cpumask)
+ return -1;
+
+ d_info->cpumask = calloc(strlen(cpumask) + 1, sizeof(char));
+ if (!d_info->cpumask)
+ return -1;
+ strcpy(d_info->cpumask, cpumask);
+
+ cpulist = do_read_string(ff);
+ if (!cpulist)
+ return -1;
+
+ d_info->cpulist = calloc(strlen(cpulist) + 1, sizeof(char));
+ if (!d_info->cpulist)
+ return -1;
+ strcpy(d_info->cpulist, cpulist);
+ }
+ }
+
+ return ret;
+}
+
#define FEAT_OPR(n, func, __full_only) \
[HEADER_##n] = { \
.name = __stringify(n), \
@@ -3460,6 +3763,7 @@ const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE] = {
FEAT_OPR(CLOCK_DATA, clock_data, false),
FEAT_OPN(HYBRID_TOPOLOGY, hybrid_topology, true),
FEAT_OPR(PMU_CAPS, pmu_caps, false),
+ FEAT_OPR(CPU_DOMAIN_INFO, cpu_domain_info, true),
};
struct header_print_data {
--git a/tools/perf/util/header.h b/tools/perf/util/header.h
index d16dfceccd74..edcb95e0dc49 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -53,6 +53,7 @@ enum {
HEADER_CLOCK_DATA,
HEADER_HYBRID_TOPOLOGY,
HEADER_PMU_CAPS,
+ HEADER_CPU_DOMAIN_INFO,
HEADER_LAST_FEATURE,
HEADER_FEAT_BITS = 256,
};
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 1b91834e11de..47bfc0259b0e 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -263,6 +263,48 @@ void print_separator(int pre_dash_cnt, const char *s, int post_dash_cnt)
graph_dotted_line);
}
+void cpumask_to_cpulist(char *cpumask, char *cpulist)
+{
+ int i, j, bm_size, nbits;
+ int len = strlen(cpumask);
+ unsigned long *bm;
+ char cpus[1024];
+
+ for (i = 0; i < len; i++) {
+ if (cpumask[i] == ',') {
+ for (j = i; j < len; j++)
+ cpumask[j] = cpumask[j + 1];
+ }
+ }
+
+ len = strlen(cpumask);
+ bm_size = (len + 15) / 16;
+ nbits = bm_size * 64;
+ if (nbits <= 0)
+ return;
+
+ bm = calloc(bm_size, sizeof(unsigned long));
+ if (!cpumask)
+ goto free_bm;
+
+ for (i = 0; i < bm_size; i++) {
+ char blk[17];
+ int blklen = len > 16 ? 16 : len;
+
+ strncpy(blk, cpumask + len - blklen, blklen);
+ blk[len] = '\0';
+ bm[i] = strtoul(blk, NULL, 16);
+ cpumask[len - blklen] = '\0';
+ len = strlen(cpumask);
+ }
+
+ bitmap_scnprintf(bm, nbits, cpus, sizeof(cpus));
+ strcpy(cpulist, cpus);
+
+free_bm:
+ free(bm);
+}
+
int rm_rf_perf_data(const char *path)
{
const char *pat[] = {
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index de69384380c2..90a8b4d2e59c 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -11,6 +11,7 @@
#include <stdbool.h>
#include <stddef.h>
#include <linux/compiler.h>
+#include <linux/bitmap.h>
#include <sys/types.h>
#ifndef __cplusplus
#include <internal/cpumap.h>
@@ -50,6 +51,8 @@ int perf_tip(char **strp, const char *dirpath);
void print_separator(int pre_dash_cnt, const char *s, int post_dash_cnt);
+void cpumask_to_cpulist(char *cpumask, char *cpulist);
+
#ifndef HAVE_SCHED_GETCPU_SUPPORT
int sched_getcpu(void);
#endif
--
2.43.0
^ permalink raw reply related [flat|nested] 13+ messages in thread* Re: [PATCH RESEND v4 03/11] perf header: Support CPU DOMAIN relation info
2025-09-09 11:42 ` [PATCH RESEND v4 03/11] perf header: Support CPU DOMAIN relation info Swapnil Sapkal
@ 2025-09-17 6:20 ` kernel test robot
0 siblings, 0 replies; 13+ messages in thread
From: kernel test robot @ 2025-09-17 6:20 UTC (permalink / raw)
To: Swapnil Sapkal
Cc: oe-lkp, lkp, linux-perf-users, linux-kernel, peterz, mingo, acme,
namhyung, irogers, james.clark, ravi.bangoria, swapnil.sapkal,
yu.c.chen, mark.rutland, alexander.shishkin, jolsa, rostedt,
vincent.guittot, adrian.hunter, kan.liang, gautham.shenoy,
kprateek.nayak, juri.lelli, yangjihong, void, tj, sshegde, ctshao,
quic_zhonhan, thomas.falcon, blakejones, ashelat, leo.yan,
dvyukov, ak, yujie.liu, graham.woodward, ben.gainey, vineethr,
tim.c.chen, linux, santosh.shukla, sandipan.das, oliver.sang
Hello,
kernel test robot noticed "perf-sanity-tests.perf_pipe_recording_and_injection_test.fail" on:
commit: 01c79e2544b044e2c01ab435a28a03c3f0d63be3 ("[PATCH RESEND v4 03/11] perf header: Support CPU DOMAIN relation info")
url: https://github.com/intel-lab-lkp/linux/commits/Swapnil-Sapkal/perf-Add-print_separator-to-util/20250909-195256
base: https://git.kernel.org/cgit/linux/kernel/git/perf/perf-tools-next.git perf-tools-next
patch link: https://lore.kernel.org/all/20250909114227.58802-4-swapnil.sapkal@amd.com/
patch subject: [PATCH RESEND v4 03/11] perf header: Support CPU DOMAIN relation info
in testcase: perf-sanity-tests
version:
with following parameters:
perf_compiler: gcc
group: group-02
config: x86_64-rhel-9.4-bpf
compiler: gcc-14
test machine: 256 threads 2 sockets GENUINE INTEL(R) XEON(R) (Sierra Forest) with 128G memory
(please refer to attached dmesg/kmsg for entire log/backtrace)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <oliver.sang@intel.com>
| Closes: https://lore.kernel.org/oe-lkp/202509171303.b892ad4b-lkp@intel.com
we also observed failure of perf-sanity-tests.Zstd_perf.data_compression/decompression
which can pass on parent.
138d89b6ba9cd79a 01c79e2544b044e2c01ab435a28
---------------- ---------------------------
fail:runs %reproduction fail:runs
| | |
:6 100% 6:6 perf-sanity-tests.Zstd_perf.data_compression/decompression.fail
:6 100% 6:6 perf-sanity-tests.perf_pipe_recording_and_injection_test.fail
2025-09-15 14:22:07 sudo /usr/src/linux-perf-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/perf test 88 -v
88: perf pipe recording and injection test : Running (1 active)
--- start ---
test child forked, pid 16787
140175e-1401871 l noploop
perf does have symbol 'noploop'
Record+report pipe test
util/util.c:295:6: runtime error: index 64 out of bounds for type 'char [17]'
util/util.c:295:12: runtime error: store to address 0x7f4b418a1060 with insufficient space for an object of type 'char'
0x7f4b418a1060: note: pointer points here
00 00 00 00 01 00 00 00 00 00 00 00 18 00 04 2e 00 00 00 00 9d 77 00 00 00 00 00 00 02 00 00 00
^
=================================================================
==16803==ERROR: AddressSanitizer: stack-buffer-overflow on address 0x7f4b418a1050 at pc 0x561dc13e9257 bp 0x7ffe75d35540 sp 0x7ffe75d35538
WRITE of size 1 at 0x7f4b418a1050 thread T0
#0 0x561dc13e9256 in cpumask_to_cpulist util/util.c:295
#1 0x561dc11fb37b in build_cpu_domain_map util/header.c:1727
#2 0x561dc11fb91f in write_cpu_domain_info util/header.c:1753
#3 0x561dc145a9ef in perf_event__synthesize_features util/synthetic-events.c:2419
#4 0x561dc145b224 in perf_event__synthesize_for_pipe util/synthetic-events.c:2471
#5 0x561dc0bc2085 in record__synthesize /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/builtin-record.c:2063
#6 0x561dc0bc9ded in __cmd_record /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/builtin-record.c:2581
#7 0x561dc0bda8cb in cmd_record /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/builtin-record.c:4376
#8 0x561dc0de7aac in run_builtin /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/perf.c:349
#9 0x561dc0de839d in handle_internal_command /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/perf.c:401
#10 0x561dc0de88f3 in run_argv /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/perf.c:445
#11 0x561dc0de909a in main /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/perf.c:553
#12 0x7f4b4efc7ca7 (/lib/x86_64-linux-gnu/libc.so.6+0x29ca7) (BuildId: def5460e3cee00bfee25b429c97bcc4853e5b3a8)
#13 0x7f4b4efc7d64 in __libc_start_main (/lib/x86_64-linux-gnu/libc.so.6+0x29d64) (BuildId: def5460e3cee00bfee25b429c97bcc4853e5b3a8)
#14 0x561dc0b58470 in _start (/usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/perf+0xf9b470) (BuildId: 7c6fd1162dbb1e721de92bbfd9fc751e2178f1fa)
Address 0x7f4b418a1050 is located in stack of thread T0 at offset 80 in frame
#0 0x561dc13e8c26 in cpumask_to_cpulist util/util.c:267
This frame has 2 object(s):
[32, 49) 'blk' (line 291)
[96, 1120) 'cpus' (line 271) <== Memory access at offset 80 underflows this variable
HINT: this may be a false positive if your program uses some custom stack unwind mechanism, swapcontext or vfork
(longjmp and C++ exceptions *are* supported)
SUMMARY: AddressSanitizer: stack-buffer-overflow util/util.c:295 in cpumask_to_cpulist
Shadow bytes around the buggy address:
0x7f4b418a0d80: 00 00 00 00 00 00 00 00 00 00 00 00 f3 f3 f3 f3
0x7f4b418a0e00: f3 f3 f3 f3 f3 f3 f3 f3 f3 f3 f3 f3 00 00 00 00
0x7f4b418a0e80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7f4b418a0f00: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7f4b418a0f80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
=>0x7f4b418a1000: f1 f1 f1 f1 00 00 01 f2 f2 f2[f2]f2 00 00 00 00
0x7f4b418a1080: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7f4b418a1100: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7f4b418a1180: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7f4b418a1200: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7f4b418a1280: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
Shadow byte legend (one shadow byte represents 8 application bytes):
Addressable: 00
Partially addressable: 01 02 03 04 05 06 07
Heap left redzone: fa
Freed heap region: fd
Stack left redzone: f1
Stack mid redzone: f2
Stack right redzone: f3
Stack after return: f5
Stack use after scope: f8
Global redzone: f9
Global init order: f6
Poisoned by user: f7
Container overflow: fc
Array cookie: ac
Intra object redzone: bb
ASan internal: fe
Left alloca redzone: ca
Right alloca redzone: cb
==16803==ABORTING
Record+report pipe test [Failed - cannot find the test file in the perf report #1]
Inject -B build-ids test
util/util.c:295:6: runtime error: index 64 out of bounds for type 'char [17]'
util/util.c:295:12: runtime error: store to address 0x7faa03ca1060 with insufficient space for an object of type 'char'
0x7faa03ca1060: note: pointer points here
00 00 00 00 01 00 00 00 00 00 00 00 18 00 04 2e 00 00 00 00 9d 77 00 00 00 00 00 00 02 00 00 00
^
=================================================================
==16818==ERROR: AddressSanitizer: stack-buffer-overflow on address 0x7faa03ca1050 at pc 0x5613ca51a257 bp 0x7ffde4af7240 sp 0x7ffde4af7238
WRITE of size 1 at 0x7faa03ca1050 thread T0
#0 0x5613ca51a256 in cpumask_to_cpulist util/util.c:295
#1 0x5613ca32c37b in build_cpu_domain_map util/header.c:1727
#2 0x5613ca32c91f in write_cpu_domain_info util/header.c:1753
#3 0x5613ca58b9ef in perf_event__synthesize_features util/synthetic-events.c:2419
#4 0x5613ca58c224 in perf_event__synthesize_for_pipe util/synthetic-events.c:2471
#5 0x5613c9cf3085 in record__synthesize /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/builtin-record.c:2063
#6 0x5613c9cfaded in __cmd_record /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/builtin-record.c:2581
#7 0x5613c9d0b8cb in cmd_record /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/builtin-record.c:4376
#8 0x5613c9f18aac in run_builtin /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/perf.c:349
#9 0x5613c9f1939d in handle_internal_command /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/perf.c:401
#10 0x5613c9f198f3 in run_argv /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/perf.c:445
#11 0x5613c9f1a09a in main /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/perf.c:553
#12 0x7faa11387ca7 (/lib/x86_64-linux-gnu/libc.so.6+0x29ca7) (BuildId: def5460e3cee00bfee25b429c97bcc4853e5b3a8)
#13 0x7faa11387d64 in __libc_start_main (/lib/x86_64-linux-gnu/libc.so.6+0x29d64) (BuildId: def5460e3cee00bfee25b429c97bcc4853e5b3a8)
#14 0x5613c9c89470 in _start (/usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/perf+0xf9b470) (BuildId: 7c6fd1162dbb1e721de92bbfd9fc751e2178f1fa)
Address 0x7faa03ca1050 is located in stack of thread T0 at offset 80 in frame
#0 0x5613ca519c26 in cpumask_to_cpulist util/util.c:267
This frame has 2 object(s):
[32, 49) 'blk' (line 291)
[96, 1120) 'cpus' (line 271) <== Memory access at offset 80 underflows this variable
HINT: this may be a false positive if your program uses some custom stack unwind mechanism, swapcontext or vfork
(longjmp and C++ exceptions *are* supported)
SUMMARY: AddressSanitizer: stack-buffer-overflow util/util.c:295 in cpumask_to_cpulist
Shadow bytes around the buggy address:
0x7faa03ca0d80: 00 00 00 00 00 00 00 00 00 00 00 00 f3 f3 f3 f3
0x7faa03ca0e00: f3 f3 f3 f3 f3 f3 f3 f3 f3 f3 f3 f3 00 00 00 00
0x7faa03ca0e80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7faa03ca0f00: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7faa03ca0f80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
=>0x7faa03ca1000: f1 f1 f1 f1 00 00 01 f2 f2 f2[f2]f2 00 00 00 00
0x7faa03ca1080: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7faa03ca1100: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7faa03ca1180: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7faa03ca1200: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7faa03ca1280: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
Shadow byte legend (one shadow byte represents 8 application bytes):
Addressable: 00
Partially addressable: 01 02 03 04 05 06 07
Heap left redzone: fa
Freed heap region: fd
Stack left redzone: f1
Stack mid redzone: f2
Stack right redzone: f3
Stack after return: f5
Stack use after scope: f8
Global redzone: f9
Global init order: f6
Poisoned by user: f7
Container overflow: fc
Array cookie: ac
Intra object redzone: bb
ASan internal: fe
Left alloca redzone: ca
Right alloca redzone: cb
==16818==ABORTING
Error:
The - data has no samples!
Inject build-ids test [Failed - cannot find noploop function in pipe #1]
Inject -b build-ids test
util/util.c:295:6: runtime error: index 64 out of bounds for type 'char [17]'
util/util.c:295:12: runtime error: store to address 0x7fad986a1060 with insufficient space for an object of type 'char'
0x7fad986a1060: note: pointer points here
00 00 00 00 01 00 00 00 00 00 00 00 18 00 04 2e 00 00 00 00 9d 77 00 00 00 00 00 00 02 00 00 00
^
=================================================================
==16835==ERROR: AddressSanitizer: stack-buffer-overflow on address 0x7fad986a1050 at pc 0x563c459e2257 bp 0x7ffe9db4ac80 sp 0x7ffe9db4ac78
WRITE of size 1 at 0x7fad986a1050 thread T0
#0 0x563c459e2256 in cpumask_to_cpulist util/util.c:295
#1 0x563c457f437b in build_cpu_domain_map util/header.c:1727
#2 0x563c457f491f in write_cpu_domain_info util/header.c:1753
#3 0x563c45a539ef in perf_event__synthesize_features util/synthetic-events.c:2419
#4 0x563c45a54224 in perf_event__synthesize_for_pipe util/synthetic-events.c:2471
#5 0x563c451bb085 in record__synthesize /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/builtin-record.c:2063
#6 0x563c451c2ded in __cmd_record /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/builtin-record.c:2581
#7 0x563c451d38cb in cmd_record /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/builtin-record.c:4376
#8 0x563c453e0aac in run_builtin /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/perf.c:349
#9 0x563c453e139d in handle_internal_command /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/perf.c:401
#10 0x563c453e18f3 in run_argv /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/perf.c:445
#11 0x563c453e209a in main /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/perf.c:553
#12 0x7fada5de9ca7 (/lib/x86_64-linux-gnu/libc.so.6+0x29ca7) (BuildId: def5460e3cee00bfee25b429c97bcc4853e5b3a8)
#13 0x7fada5de9d64 in __libc_start_main (/lib/x86_64-linux-gnu/libc.so.6+0x29d64) (BuildId: def5460e3cee00bfee25b429c97bcc4853e5b3a8)
#14 0x563c45151470 in _start (/usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/perf+0xf9b470) (BuildId: 7c6fd1162dbb1e721de92bbfd9fc751e2178f1fa)
Address 0x7fad986a1050 is located in stack of thread T0 at offset 80 in frame
#0 0x563c459e1c26 in cpumask_to_cpulist util/util.c:267
This frame has 2 object(s):
[32, 49) 'blk' (line 291)
[96, 1120) 'cpus' (line 271) <== Memory access at offset 80 underflows this variable
HINT: this may be a false positive if your program uses some custom stack unwind mechanism, swapcontext or vfork
(longjmp and C++ exceptions *are* supported)
SUMMARY: AddressSanitizer: stack-buffer-overflow util/util.c:295 in cpumask_to_cpulist
Shadow bytes around the buggy address:
0x7fad986a0d80: 00 00 00 00 00 00 00 00 00 00 00 00 f3 f3 f3 f3
0x7fad986a0e00: f3 f3 f3 f3 f3 f3 f3 f3 f3 f3 f3 f3 00 00 00 00
0x7fad986a0e80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7fad986a0f00: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7fad986a0f80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
=>0x7fad986a1000: f1 f1 f1 f1 00 00 01 f2 f2 f2[f2]f2 00 00 00 00
0x7fad986a1080: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7fad986a1100: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7fad986a1180: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7fad986a1200: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7fad986a1280: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
Shadow byte legend (one shadow byte represents 8 application bytes):
Addressable: 00
Partially addressable: 01 02 03 04 05 06 07
Heap left redzone: fa
Freed heap region: fd
Stack left redzone: f1
Stack mid redzone: f2
Stack right redzone: f3
Stack after return: f5
Stack use after scope: f8
Global redzone: f9
Global init order: f6
Poisoned by user: f7
Container overflow: fc
Array cookie: ac
Intra object redzone: bb
ASan internal: fe
Left alloca redzone: ca
Right alloca redzone: cb
==16835==ABORTING
Error:
The - data has no samples!
Inject build-ids test [Failed - cannot find noploop function in pipe #1]
Inject --buildid-all build-ids test
util/util.c:295:6: runtime error: index 64 out of bounds for type 'char [17]'
util/util.c:295:12: runtime error: store to address 0x7fb708ca1060 with insufficient space for an object of type 'char'
0x7fb708ca1060: note: pointer points here
00 00 00 00 01 00 00 00 00 00 00 00 18 00 04 2e 00 00 00 00 9d 77 00 00 00 00 00 00 02 00 00 00
^
=================================================================
==16849==ERROR: AddressSanitizer: stack-buffer-overflow on address 0x7fb708ca1050 at pc 0x55677eb40257 bp 0x7ffe5b049bd0 sp 0x7ffe5b049bc8
WRITE of size 1 at 0x7fb708ca1050 thread T0
#0 0x55677eb40256 in cpumask_to_cpulist util/util.c:295
#1 0x55677e95237b in build_cpu_domain_map util/header.c:1727
#2 0x55677e95291f in write_cpu_domain_info util/header.c:1753
#3 0x55677ebb19ef in perf_event__synthesize_features util/synthetic-events.c:2419
#4 0x55677ebb2224 in perf_event__synthesize_for_pipe util/synthetic-events.c:2471
#5 0x55677e319085 in record__synthesize /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/builtin-record.c:2063
#6 0x55677e320ded in __cmd_record /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/builtin-record.c:2581
#7 0x55677e3318cb in cmd_record /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/builtin-record.c:4376
#8 0x55677e53eaac in run_builtin /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/perf.c:349
#9 0x55677e53f39d in handle_internal_command /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/perf.c:401
#10 0x55677e53f8f3 in run_argv /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/perf.c:445
#11 0x55677e54009a in main /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/perf.c:553
#12 0x7fb716412ca7 (/lib/x86_64-linux-gnu/libc.so.6+0x29ca7) (BuildId: def5460e3cee00bfee25b429c97bcc4853e5b3a8)
#13 0x7fb716412d64 in __libc_start_main (/lib/x86_64-linux-gnu/libc.so.6+0x29d64) (BuildId: def5460e3cee00bfee25b429c97bcc4853e5b3a8)
#14 0x55677e2af470 in _start (/usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/perf+0xf9b470) (BuildId: 7c6fd1162dbb1e721de92bbfd9fc751e2178f1fa)
Address 0x7fb708ca1050 is located in stack of thread T0 at offset 80 in frame
#0 0x55677eb3fc26 in cpumask_to_cpulist util/util.c:267
This frame has 2 object(s):
[32, 49) 'blk' (line 291)
[96, 1120) 'cpus' (line 271) <== Memory access at offset 80 underflows this variable
HINT: this may be a false positive if your program uses some custom stack unwind mechanism, swapcontext or vfork
(longjmp and C++ exceptions *are* supported)
SUMMARY: AddressSanitizer: stack-buffer-overflow util/util.c:295 in cpumask_to_cpulist
Shadow bytes around the buggy address:
0x7fb708ca0d80: 00 00 00 00 00 00 00 00 00 00 00 00 f3 f3 f3 f3
0x7fb708ca0e00: f3 f3 f3 f3 f3 f3 f3 f3 f3 f3 f3 f3 00 00 00 00
0x7fb708ca0e80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7fb708ca0f00: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7fb708ca0f80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
=>0x7fb708ca1000: f1 f1 f1 f1 00 00 01 f2 f2 f2[f2]f2 00 00 00 00
0x7fb708ca1080: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7fb708ca1100: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7fb708ca1180: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7fb708ca1200: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7fb708ca1280: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
Shadow byte legend (one shadow byte represents 8 application bytes):
Addressable: 00
Partially addressable: 01 02 03 04 05 06 07
Heap left redzone: fa
Freed heap region: fd
Stack left redzone: f1
Stack mid redzone: f2
Stack right redzone: f3
Stack after return: f5
Stack use after scope: f8
Global redzone: f9
Global init order: f6
Poisoned by user: f7
Container overflow: fc
Array cookie: ac
Intra object redzone: bb
ASan internal: fe
Left alloca redzone: ca
Right alloca redzone: cb
==16849==ABORTING
Error:
The - data has no samples!
Inject build-ids test [Failed - cannot find noploop function in pipe #1]
Inject --mmap2-buildid-all build-ids test
util/util.c:295:6: runtime error: index 64 out of bounds for type 'char [17]'
util/util.c:295:12: runtime error: store to address 0x7f790f8a1060 with insufficient space for an object of type 'char'
0x7f790f8a1060: note: pointer points here
00 00 00 00 01 00 00 00 00 00 00 00 18 00 04 2e 00 00 00 00 9d 77 00 00 00 00 00 00 02 00 00 00
^
=================================================================
==16869==ERROR: AddressSanitizer: stack-buffer-overflow on address 0x7f790f8a1050 at pc 0x5602d1c61257 bp 0x7ffc6a01f190 sp 0x7ffc6a01f188
WRITE of size 1 at 0x7f790f8a1050 thread T0
#0 0x5602d1c61256 in cpumask_to_cpulist util/util.c:295
#1 0x5602d1a7337b in build_cpu_domain_map util/header.c:1727
#2 0x5602d1a7391f in write_cpu_domain_info util/header.c:1753
#3 0x5602d1cd29ef in perf_event__synthesize_features util/synthetic-events.c:2419
#4 0x5602d1cd3224 in perf_event__synthesize_for_pipe util/synthetic-events.c:2471
#5 0x5602d143a085 in record__synthesize /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/builtin-record.c:2063
#6 0x5602d1441ded in __cmd_record /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/builtin-record.c:2581
#7 0x5602d14528cb in cmd_record /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/builtin-record.c:4376
#8 0x5602d165faac in run_builtin /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/perf.c:349
#9 0x5602d166039d in handle_internal_command /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/perf.c:401
#10 0x5602d16608f3 in run_argv /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/perf.c:445
#11 0x5602d166109a in main /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/perf.c:553
#12 0x7f791cfd5ca7 (/lib/x86_64-linux-gnu/libc.so.6+0x29ca7) (BuildId: def5460e3cee00bfee25b429c97bcc4853e5b3a8)
#13 0x7f791cfd5d64 in __libc_start_main (/lib/x86_64-linux-gnu/libc.so.6+0x29d64) (BuildId: def5460e3cee00bfee25b429c97bcc4853e5b3a8)
#14 0x5602d13d0470 in _start (/usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/perf+0xf9b470) (BuildId: 7c6fd1162dbb1e721de92bbfd9fc751e2178f1fa)
Address 0x7f790f8a1050 is located in stack of thread T0 at offset 80 in frame
#0 0x5602d1c60c26 in cpumask_to_cpulist util/util.c:267
This frame has 2 object(s):
[32, 49) 'blk' (line 291)
[96, 1120) 'cpus' (line 271) <== Memory access at offset 80 underflows this variable
HINT: this may be a false positive if your program uses some custom stack unwind mechanism, swapcontext or vfork
(longjmp and C++ exceptions *are* supported)
SUMMARY: AddressSanitizer: stack-buffer-overflow util/util.c:295 in cpumask_to_cpulist
Shadow bytes around the buggy address:
0x7f790f8a0d80: 00 00 00 00 00 00 00 00 00 00 00 00 f3 f3 f3 f3
0x7f790f8a0e00: f3 f3 f3 f3 f3 f3 f3 f3 f3 f3 f3 f3 00 00 00 00
0x7f790f8a0e80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7f790f8a0f00: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7f790f8a0f80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
=>0x7f790f8a1000: f1 f1 f1 f1 00 00 01 f2 f2 f2[f2]f2 00 00 00 00
0x7f790f8a1080: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7f790f8a1100: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7f790f8a1180: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7f790f8a1200: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7f790f8a1280: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
Shadow byte legend (one shadow byte represents 8 application bytes):
Addressable: 00
Partially addressable: 01 02 03 04 05 06 07
Heap left redzone: fa
Freed heap region: fd
Stack left redzone: f1
Stack mid redzone: f2
Stack right redzone: f3
Stack after return: f5
Stack use after scope: f8
Global redzone: f9
Global init order: f6
Poisoned by user: f7
Container overflow: fc
Array cookie: ac
Intra object redzone: bb
ASan internal: fe
Left alloca redzone: ca
Right alloca redzone: cb
==16869==ABORTING
Error:
The - data has no samples!
Inject build-ids test [Failed - cannot find noploop function in pipe #1]
---- end(-1) ----
88: perf pipe recording and injection test : FAILED!
2025-09-15 14:22:22 sudo /usr/src/linux-perf-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/perf test 89 -v
89: Zstd perf.data compression/decompression : Running (1 active)
--- start ---
test child forked, pid 16896
Collecting compressed record file:
500+0 records in
500+0 records out
256000 bytes (256 kB, 250 KiB) copied, 0.00308355 s, 83.0 MB/s
[ perf record: Woken up 2 times to write data ]
util/util.c:295:6: runtime error: index 64 out of bounds for type 'char [17]'
util/util.c:295:12: runtime error: store to address 0x7f9ffdca2060 with insufficient space for an object of type 'char'
0x7f9ffdca2060: note: pointer points here
00 00 00 00 01 00 00 00 00 00 00 00 18 00 04 2e 00 00 00 00 9d 77 00 00 00 00 00 00 02 00 00 00
^
=================================================================
==16904==ERROR: AddressSanitizer: stack-buffer-overflow on address 0x7f9ffdca2050 at pc 0x564af4570257 bp 0x7ffd6f117510 sp 0x7ffd6f117508
WRITE of size 1 at 0x7f9ffdca2050 thread T0
#0 0x564af4570256 in cpumask_to_cpulist util/util.c:295
#1 0x564af438237b in build_cpu_domain_map util/header.c:1727
#2 0x564af438291f in write_cpu_domain_info util/header.c:1753
#3 0x564af439cc44 in do_write_feat util/header.c:3892
#4 0x564af439d65f in perf_header__adds_write util/header.c:3936
#5 0x564af439f14a in perf_session__do_write_header util/header.c:4060
#6 0x564af439f72c in perf_session__write_header util/header.c:4100
#7 0x564af3d46044 in record__finish_output /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/builtin-record.c:1826
#8 0x564af3d53e54 in __cmd_record /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/builtin-record.c:2894
#9 0x564af3d618cb in cmd_record /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/builtin-record.c:4376
#10 0x564af3f6eaac in run_builtin /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/perf.c:349
#11 0x564af3f6f39d in handle_internal_command /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/perf.c:401
#12 0x564af3f6f8f3 in run_argv /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/perf.c:445
#13 0x564af3f7009a in main /usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/perf.c:553
#14 0x7fa00b28eca7 (/lib/x86_64-linux-gnu/libc.so.6+0x29ca7) (BuildId: def5460e3cee00bfee25b429c97bcc4853e5b3a8)
#15 0x7fa00b28ed64 in __libc_start_main (/lib/x86_64-linux-gnu/libc.so.6+0x29d64) (BuildId: def5460e3cee00bfee25b429c97bcc4853e5b3a8)
#16 0x564af3cdf470 in _start (/usr/src/perf_selftests-x86_64-rhel-9.4-bpf-01c79e2544b044e2c01ab435a28a03c3f0d63be3/tools/perf/perf+0xf9b470) (BuildId: 7c6fd1162dbb1e721de92bbfd9fc751e2178f1fa)
Address 0x7f9ffdca2050 is located in stack of thread T0 at offset 80 in frame
#0 0x564af456fc26 in cpumask_to_cpulist util/util.c:267
This frame has 2 object(s):
[32, 49) 'blk' (line 291)
[96, 1120) 'cpus' (line 271) <== Memory access at offset 80 underflows this variable
HINT: this may be a false positive if your program uses some custom stack unwind mechanism, swapcontext or vfork
(longjmp and C++ exceptions *are* supported)
SUMMARY: AddressSanitizer: stack-buffer-overflow util/util.c:295 in cpumask_to_cpulist
Shadow bytes around the buggy address:
0x7f9ffdca1d80: 00 00 00 00 00 00 00 00 00 00 00 00 f3 f3 f3 f3
0x7f9ffdca1e00: f3 f3 f3 f3 f3 f3 f3 f3 f3 f3 f3 f3 00 00 00 00
0x7f9ffdca1e80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7f9ffdca1f00: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7f9ffdca1f80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
=>0x7f9ffdca2000: f1 f1 f1 f1 00 00 01 f2 f2 f2[f2]f2 00 00 00 00
0x7f9ffdca2080: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7f9ffdca2100: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7f9ffdca2180: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7f9ffdca2200: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
0x7f9ffdca2280: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
Shadow byte legend (one shadow byte represents 8 application bytes):
Addressable: 00
Partially addressable: 01 02 03 04 05 06 07
Heap left redzone: fa
Freed heap region: fd
Stack left redzone: f1
Stack mid redzone: f2
Stack right redzone: f3
Stack after return: f5
Stack use after scope: f8
Global redzone: f9
Global init order: f6
Poisoned by user: f7
Container overflow: fc
Array cookie: ac
Intra object redzone: bb
ASan internal: fe
Left alloca redzone: ca
Right alloca redzone: cb
==16904==ABORTING
---- end(-1) ----
89: Zstd perf.data compression/decompression : FAILED!
The kernel config and materials to reproduce are available at:
https://download.01.org/0day-ci/archive/20250917/202509171303.b892ad4b-lkp@intel.com
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH RESEND v4 04/11] perf sched stats: Add record and rawdump support
2025-09-09 11:42 [PATCH RESEND v4 00/11] perf sched: Introduce stats tool Swapnil Sapkal
` (2 preceding siblings ...)
2025-09-09 11:42 ` [PATCH RESEND v4 03/11] perf header: Support CPU DOMAIN relation info Swapnil Sapkal
@ 2025-09-09 11:42 ` Swapnil Sapkal
2025-09-09 11:42 ` [PATCH RESEND v4 05/11] perf sched stats: Add schedstat v16 support Swapnil Sapkal
` (6 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: Swapnil Sapkal @ 2025-09-09 11:42 UTC (permalink / raw)
To: peterz, mingo, acme, namhyung, irogers, james.clark
Cc: ravi.bangoria, swapnil.sapkal, yu.c.chen, mark.rutland,
alexander.shishkin, jolsa, rostedt, vincent.guittot,
adrian.hunter, kan.liang, gautham.shenoy, kprateek.nayak,
juri.lelli, yangjihong, void, tj, sshegde, ctshao, quic_zhonhan,
thomas.falcon, blakejones, ashelat, leo.yan, dvyukov, ak,
yujie.liu, graham.woodward, ben.gainey, vineethr, tim.c.chen,
linux, linux-kernel, linux-perf-users, santosh.shukla,
sandipan.das, James Clark
Define new, perf tool only, sample types and their layouts. Add logic
to parse /proc/schedstat, convert it to perf sample format and save
samples to perf.data file with `perf sched stats record` command. Also
add logic to read perf.data file, interpret schedstat samples and
print rawdump of samples with `perf script -D`.
Note that, /proc/schedstat file output is standardized with version
number. The patch supports v15 but older or newer version can be added
easily.
Co-developed-by: Ravi Bangoria <ravi.bangoria@amd.com>
Signed-off-by: Ravi Bangoria <ravi.bangoria@amd.com>
Tested-by: James Clark <james.clark@linaro.org>
Signed-off-by: Swapnil Sapkal <swapnil.sapkal@amd.com>
---
tools/lib/perf/Documentation/libperf.txt | 2 +
tools/lib/perf/Makefile | 1 +
tools/lib/perf/include/perf/event.h | 41 ++++
tools/lib/perf/include/perf/schedstat-v15.h | 146 +++++++++++++
tools/perf/builtin-inject.c | 2 +
tools/perf/builtin-sched.c | 222 +++++++++++++++++++-
tools/perf/util/event.c | 40 ++++
tools/perf/util/event.h | 2 +
tools/perf/util/session.c | 22 ++
tools/perf/util/synthetic-events.c | 179 ++++++++++++++++
tools/perf/util/synthetic-events.h | 3 +
tools/perf/util/tool.c | 18 ++
tools/perf/util/tool.h | 4 +-
13 files changed, 680 insertions(+), 2 deletions(-)
create mode 100644 tools/lib/perf/include/perf/schedstat-v15.h
diff --git a/tools/lib/perf/Documentation/libperf.txt b/tools/lib/perf/Documentation/libperf.txt
index 4072bc9b7670..576ecc5fc312 100644
--- a/tools/lib/perf/Documentation/libperf.txt
+++ b/tools/lib/perf/Documentation/libperf.txt
@@ -211,6 +211,8 @@ SYNOPSIS
struct perf_record_header_feature;
struct perf_record_compressed;
struct perf_record_compressed2;
+ struct perf_record_schedstat_cpu;
+ struct perf_record_schedstat_domain;
--
DESCRIPTION
diff --git a/tools/lib/perf/Makefile b/tools/lib/perf/Makefile
index 7fbb50b74c00..9fa28e512ca8 100644
--- a/tools/lib/perf/Makefile
+++ b/tools/lib/perf/Makefile
@@ -179,6 +179,7 @@ install_lib: libs
cp -fpR $(LIBPERF_ALL) $(DESTDIR)$(libdir_SQ)
HDRS := bpf_perf.h core.h cpumap.h threadmap.h evlist.h evsel.h event.h mmap.h
+HDRS += schedstat-v15.h
INTERNAL_HDRS := cpumap.h evlist.h evsel.h lib.h mmap.h rc_check.h threadmap.h xyarray.h
INSTALL_HDRS_PFX := $(DESTDIR)$(prefix)/include/perf
diff --git a/tools/lib/perf/include/perf/event.h b/tools/lib/perf/include/perf/event.h
index 6608f1e3701b..7ed7bae73b55 100644
--- a/tools/lib/perf/include/perf/event.h
+++ b/tools/lib/perf/include/perf/event.h
@@ -483,6 +483,43 @@ struct perf_record_bpf_metadata {
struct perf_record_bpf_metadata_entry entries[];
};
+struct perf_record_schedstat_cpu_v15 {
+#define CPU_FIELD(_type, _name, _desc, _format, _is_pct, _pct_of, _ver) _type _name
+#include "schedstat-v15.h"
+#undef CPU_FIELD
+};
+
+struct perf_record_schedstat_cpu {
+ struct perf_event_header header;
+ __u64 timestamp;
+ __u32 cpu;
+ __u16 version;
+ /* Padding */
+ char __pad[2];
+ union {
+ struct perf_record_schedstat_cpu_v15 v15;
+ };
+};
+
+struct perf_record_schedstat_domain_v15 {
+#define DOMAIN_FIELD(_type, _name, _desc, _format, _is_jiffies, _ver) _type _name
+#include "schedstat-v15.h"
+#undef DOMAIN_FIELD
+};
+
+#define DOMAIN_NAME_LEN 16
+
+struct perf_record_schedstat_domain {
+ struct perf_event_header header;
+ __u64 timestamp;
+ __u32 cpu;
+ __u16 version;
+ __u16 domain;
+ union {
+ struct perf_record_schedstat_domain_v15 v15;
+ };
+};
+
enum perf_user_event_type { /* above any possible kernel type */
PERF_RECORD_USER_TYPE_START = 64,
PERF_RECORD_HEADER_ATTR = 64,
@@ -506,6 +543,8 @@ enum perf_user_event_type { /* above any possible kernel type */
PERF_RECORD_FINISHED_INIT = 82,
PERF_RECORD_COMPRESSED2 = 83,
PERF_RECORD_BPF_METADATA = 84,
+ PERF_RECORD_SCHEDSTAT_CPU = 85,
+ PERF_RECORD_SCHEDSTAT_DOMAIN = 86,
PERF_RECORD_HEADER_MAX
};
@@ -548,6 +587,8 @@ union perf_event {
struct perf_record_compressed pack;
struct perf_record_compressed2 pack2;
struct perf_record_bpf_metadata bpf_metadata;
+ struct perf_record_schedstat_cpu schedstat_cpu;
+ struct perf_record_schedstat_domain schedstat_domain;
};
#endif /* __LIBPERF_EVENT_H */
diff --git a/tools/lib/perf/include/perf/schedstat-v15.h b/tools/lib/perf/include/perf/schedstat-v15.h
new file mode 100644
index 000000000000..639458df05f8
--- /dev/null
+++ b/tools/lib/perf/include/perf/schedstat-v15.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifdef CPU_FIELD
+CPU_FIELD(__u32, yld_count, "sched_yield() count",
+ "%11u", false, yld_count, v15);
+CPU_FIELD(__u32, array_exp, "Legacy counter can be ignored",
+ "%11u", false, array_exp, v15);
+CPU_FIELD(__u32, sched_count, "schedule() called",
+ "%11u", false, sched_count, v15);
+CPU_FIELD(__u32, sched_goidle, "schedule() left the processor idle",
+ "%11u", true, sched_count, v15);
+CPU_FIELD(__u32, ttwu_count, "try_to_wake_up() was called",
+ "%11u", false, ttwu_count, v15);
+CPU_FIELD(__u32, ttwu_local, "try_to_wake_up() was called to wake up the local cpu",
+ "%11u", true, ttwu_count, v15);
+CPU_FIELD(__u64, rq_cpu_time, "total runtime by tasks on this processor (in jiffies)",
+ "%11llu", false, rq_cpu_time, v15);
+CPU_FIELD(__u64, run_delay, "total waittime by tasks on this processor (in jiffies)",
+ "%11llu", true, rq_cpu_time, v15);
+CPU_FIELD(__u64, pcount, "total timeslices run on this cpu",
+ "%11llu", false, pcount, v15);
+#endif
+
+#ifdef DOMAIN_FIELD
+#ifdef DOMAIN_CATEGORY
+DOMAIN_CATEGORY(" <Category idle> ");
+#endif
+DOMAIN_FIELD(__u32, idle_lb_count,
+ "load_balance() count on cpu idle", "%11u", true, v15);
+DOMAIN_FIELD(__u32, idle_lb_balanced,
+ "load_balance() found balanced on cpu idle", "%11u", true, v15);
+DOMAIN_FIELD(__u32, idle_lb_failed,
+ "load_balance() move task failed on cpu idle", "%11u", true, v15);
+DOMAIN_FIELD(__u32, idle_lb_imbalance,
+ "imbalance sum on cpu idle", "%11u", false, v15);
+DOMAIN_FIELD(__u32, idle_lb_gained,
+ "pull_task() count on cpu idle", "%11u", false, v15);
+DOMAIN_FIELD(__u32, idle_lb_hot_gained,
+ "pull_task() when target task was cache-hot on cpu idle", "%11u", false, v15);
+DOMAIN_FIELD(__u32, idle_lb_nobusyq,
+ "load_balance() failed to find busier queue on cpu idle", "%11u", true, v15);
+DOMAIN_FIELD(__u32, idle_lb_nobusyg,
+ "load_balance() failed to find busier group on cpu idle", "%11u", true, v15);
+#ifdef DERIVED_CNT_FIELD
+DERIVED_CNT_FIELD(idle_lb_success_count, "load_balance() success count on cpu idle", "%11u",
+ idle_lb_count, idle_lb_balanced, idle_lb_failed, v15);
+#endif
+#ifdef DERIVED_AVG_FIELD
+DERIVED_AVG_FIELD(idle_lb_avg_pulled,
+ "avg task pulled per successful lb attempt (cpu idle)", "%11.2Lf",
+ idle_lb_count, idle_lb_balanced, idle_lb_failed, idle_lb_gained, v15);
+#endif
+#ifdef DOMAIN_CATEGORY
+DOMAIN_CATEGORY(" <Category busy> ");
+#endif
+DOMAIN_FIELD(__u32, busy_lb_count,
+ "load_balance() count on cpu busy", "%11u", true, v15);
+DOMAIN_FIELD(__u32, busy_lb_balanced,
+ "load_balance() found balanced on cpu busy", "%11u", true, v15);
+DOMAIN_FIELD(__u32, busy_lb_failed,
+ "load_balance() move task failed on cpu busy", "%11u", true, v15);
+DOMAIN_FIELD(__u32, busy_lb_imbalance,
+ "imbalance sum on cpu busy", "%11u", false, v15);
+DOMAIN_FIELD(__u32, busy_lb_gained,
+ "pull_task() count on cpu busy", "%11u", false, v15);
+DOMAIN_FIELD(__u32, busy_lb_hot_gained,
+ "pull_task() when target task was cache-hot on cpu busy", "%11u", false, v15);
+DOMAIN_FIELD(__u32, busy_lb_nobusyq,
+ "load_balance() failed to find busier queue on cpu busy", "%11u", true, v15);
+DOMAIN_FIELD(__u32, busy_lb_nobusyg,
+ "load_balance() failed to find busier group on cpu busy", "%11u", true, v15);
+#ifdef DERIVED_CNT_FIELD
+DERIVED_CNT_FIELD(busy_lb_success_count, "load_balance() success count on cpu busy", "%11u",
+ busy_lb_count, busy_lb_balanced, busy_lb_failed, v15);
+#endif
+#ifdef DERIVED_AVG_FIELD
+DERIVED_AVG_FIELD(busy_lb_avg_pulled,
+ "avg task pulled per successful lb attempt (cpu busy)", "%11.2Lf",
+ busy_lb_count, busy_lb_balanced, busy_lb_failed, busy_lb_gained, v15);
+#endif
+#ifdef DOMAIN_CATEGORY
+DOMAIN_CATEGORY(" <Category newidle> ");
+#endif
+DOMAIN_FIELD(__u32, newidle_lb_count,
+ "load_balance() count on cpu newly idle", "%11u", true, v15);
+DOMAIN_FIELD(__u32, newidle_lb_balanced,
+ "load_balance() found balanced on cpu newly idle", "%11u", true, v15);
+DOMAIN_FIELD(__u32, newidle_lb_failed,
+ "load_balance() move task failed on cpu newly idle", "%11u", true, v15);
+DOMAIN_FIELD(__u32, newidle_lb_imbalance,
+ "imbalance sum on cpu newly idle", "%11u", false, v15);
+DOMAIN_FIELD(__u32, newidle_lb_gained,
+ "pull_task() count on cpu newly idle", "%11u", false, v15);
+DOMAIN_FIELD(__u32, newidle_lb_hot_gained,
+ "pull_task() when target task was cache-hot on cpu newly idle", "%11u", false, v15);
+DOMAIN_FIELD(__u32, newidle_lb_nobusyq,
+ "load_balance() failed to find busier queue on cpu newly idle", "%11u", true, v15);
+DOMAIN_FIELD(__u32, newidle_lb_nobusyg,
+ "load_balance() failed to find busier group on cpu newly idle", "%11u", true, v15);
+#ifdef DERIVED_CNT_FIELD
+DERIVED_CNT_FIELD(newidle_lb_success_count,
+ "load_balance() success count on cpu newly idle", "%11u",
+ newidle_lb_count, newidle_lb_balanced, newidle_lb_failed, v15);
+#endif
+#ifdef DERIVED_AVG_FIELD
+DERIVED_AVG_FIELD(newidle_lb_avg_pulled,
+ "avg task pulled per successful lb attempt (cpu newly idle)", "%11.2Lf",
+ newidle_lb_count, newidle_lb_balanced, newidle_lb_failed, newidle_lb_gained, v15);
+#endif
+#ifdef DOMAIN_CATEGORY
+DOMAIN_CATEGORY(" <Category active_load_balance()> ");
+#endif
+DOMAIN_FIELD(__u32, alb_count,
+ "active_load_balance() count", "%11u", false, v15);
+DOMAIN_FIELD(__u32, alb_failed,
+ "active_load_balance() move task failed", "%11u", false, v15);
+DOMAIN_FIELD(__u32, alb_pushed,
+ "active_load_balance() successfully moved a task", "%11u", false, v15);
+#ifdef DOMAIN_CATEGORY
+DOMAIN_CATEGORY(" <Category sched_balance_exec()> ");
+#endif
+DOMAIN_FIELD(__u32, sbe_count,
+ "sbe_count is not used", "%11u", false, v15);
+DOMAIN_FIELD(__u32, sbe_balanced,
+ "sbe_balanced is not used", "%11u", false, v15);
+DOMAIN_FIELD(__u32, sbe_pushed,
+ "sbe_pushed is not used", "%11u", false, v15);
+#ifdef DOMAIN_CATEGORY
+DOMAIN_CATEGORY(" <Category sched_balance_fork()> ");
+#endif
+DOMAIN_FIELD(__u32, sbf_count,
+ "sbf_count is not used", "%11u", false, v15);
+DOMAIN_FIELD(__u32, sbf_balanced,
+ "sbf_balanced is not used", "%11u", false, v15);
+DOMAIN_FIELD(__u32, sbf_pushed,
+ "sbf_pushed is not used", "%11u", false, v15);
+#ifdef DOMAIN_CATEGORY
+DOMAIN_CATEGORY(" <Wakeup Info> ");
+#endif
+DOMAIN_FIELD(__u32, ttwu_wake_remote,
+ "try_to_wake_up() awoke a task that last ran on a diff cpu", "%11u", false, v15);
+DOMAIN_FIELD(__u32, ttwu_move_affine,
+ "try_to_wake_up() moved task because cache-cold on own cpu", "%11u", false, v15);
+DOMAIN_FIELD(__u32, ttwu_move_balance,
+ "try_to_wake_up() started passive balancing", "%11u", false, v15);
+#endif /* DOMAIN_FIELD */
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index f43a7ec44b5f..eadf5ecaa8b2 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -2538,6 +2538,8 @@ int cmd_inject(int argc, const char **argv)
inject.tool.compressed = perf_event__repipe_op4_synth;
inject.tool.auxtrace = perf_event__repipe_auxtrace;
inject.tool.bpf_metadata = perf_event__repipe_op2_synth;
+ inject.tool.schedstat_cpu = perf_event__repipe_op2_synth;
+ inject.tool.schedstat_domain = perf_event__repipe_op2_synth;
inject.tool.dont_split_sample_group = true;
inject.session = __perf_session__new(&data, &inject.tool,
/*trace_event_repipe=*/inject.output.is_pipe,
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index f166d6cbc083..2573491fa5f8 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -28,6 +28,8 @@
#include "util/debug.h"
#include "util/event.h"
#include "util/util.h"
+#include "util/synthetic-events.h"
+#include "util/target.h"
#include <linux/kernel.h>
#include <linux/log2.h>
@@ -55,6 +57,7 @@
#define MAX_PRIO 140
static const char *cpu_list;
+static struct perf_cpu_map *user_requested_cpus;
static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
struct sched_atom;
@@ -236,6 +239,9 @@ struct perf_sched {
volatile bool thread_funcs_exit;
const char *prio_str;
DECLARE_BITMAP(prio_bitmap, MAX_PRIO);
+
+ struct perf_session *session;
+ struct perf_data *data;
};
/* per thread run time data */
@@ -3745,6 +3751,195 @@ static void setup_sorting(struct perf_sched *sched, const struct option *options
sort_dimension__add("pid", &sched->cmp_pid);
}
+static int process_synthesized_schedstat_event(const struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
+
+ if (perf_data__write(sched->data, event, event->header.size) <= 0) {
+ pr_err("failed to write perf data, error: %m\n");
+ return -1;
+ }
+
+ sched->session->header.data_size += event->header.size;
+ return 0;
+}
+
+static void sighandler(int sig __maybe_unused)
+{
+}
+
+static int enable_sched_schedstats(int *reset)
+{
+ char path[PATH_MAX];
+ FILE *fp;
+ char ch;
+
+ snprintf(path, PATH_MAX, "%s/sys/kernel/sched_schedstats", procfs__mountpoint());
+ fp = fopen(path, "w+");
+ if (!fp) {
+ pr_err("Failed to open %s\n", path);
+ return -1;
+ }
+
+ ch = getc(fp);
+ if (ch == '0') {
+ *reset = 1;
+ rewind(fp);
+ putc('1', fp);
+ fclose(fp);
+ }
+ return 0;
+}
+
+static int disable_sched_schedstat(void)
+{
+ char path[PATH_MAX];
+ FILE *fp;
+
+ snprintf(path, PATH_MAX, "%s/sys/kernel/sched_schedstats", procfs__mountpoint());
+ fp = fopen(path, "w");
+ if (!fp) {
+ pr_err("Failed to open %s\n", path);
+ return -1;
+ }
+
+ putc('0', fp);
+ fclose(fp);
+ return 0;
+}
+
+/* perf.data or any other output file name used by stats subcommand (only). */
+const char *output_name;
+
+static int perf_sched__schedstat_record(struct perf_sched *sched,
+ int argc, const char **argv)
+{
+ struct perf_session *session;
+ struct target target = {};
+ struct evlist *evlist;
+ int reset = 0;
+ int err = 0;
+ int fd;
+ struct perf_data data = {
+ .path = output_name,
+ .mode = PERF_DATA_MODE_WRITE,
+ };
+
+ signal(SIGINT, sighandler);
+ signal(SIGCHLD, sighandler);
+ signal(SIGTERM, sighandler);
+
+ evlist = evlist__new();
+ if (!evlist)
+ return -ENOMEM;
+
+ session = perf_session__new(&data, &sched->tool);
+ if (IS_ERR(session)) {
+ pr_err("Perf session creation failed.\n");
+ evlist__delete(evlist);
+ return PTR_ERR(session);
+ }
+
+ session->evlist = evlist;
+
+ sched->session = session;
+ sched->data = &data;
+
+ fd = perf_data__fd(&data);
+
+ /*
+ * Capture all important metadata about the system. Although they are
+ * not used by `perf sched stats` tool directly, they provide useful
+ * information about profiled environment.
+ */
+ perf_header__set_feat(&session->header, HEADER_HOSTNAME);
+ perf_header__set_feat(&session->header, HEADER_OSRELEASE);
+ perf_header__set_feat(&session->header, HEADER_VERSION);
+ perf_header__set_feat(&session->header, HEADER_ARCH);
+ perf_header__set_feat(&session->header, HEADER_NRCPUS);
+ perf_header__set_feat(&session->header, HEADER_CPUDESC);
+ perf_header__set_feat(&session->header, HEADER_CPUID);
+ perf_header__set_feat(&session->header, HEADER_TOTAL_MEM);
+ perf_header__set_feat(&session->header, HEADER_CMDLINE);
+ perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY);
+ perf_header__set_feat(&session->header, HEADER_NUMA_TOPOLOGY);
+ perf_header__set_feat(&session->header, HEADER_CACHE);
+ perf_header__set_feat(&session->header, HEADER_MEM_TOPOLOGY);
+ perf_header__set_feat(&session->header, HEADER_HYBRID_TOPOLOGY);
+ perf_header__set_feat(&session->header, HEADER_CPU_DOMAIN_INFO);
+
+ err = perf_session__write_header(session, evlist, fd, false);
+ if (err < 0)
+ goto out;
+
+ /*
+ * `perf sched stats` does not support workload profiling (-p pid)
+ * since /proc/schedstat file contains cpu specific data only. Hence, a
+ * profile target is either set of cpus or systemwide, never a process.
+ * Note that, although `-- <workload>` is supported, profile data are
+ * still cpu/systemwide.
+ */
+ if (cpu_list)
+ target.cpu_list = cpu_list;
+ else
+ target.system_wide = true;
+
+ if (argc) {
+ err = evlist__prepare_workload(evlist, &target, argv, false, NULL);
+ if (err)
+ goto out;
+ }
+
+ err = evlist__create_maps(evlist, &target);
+ if (err < 0)
+ goto out;
+
+ user_requested_cpus = evlist->core.user_requested_cpus;
+
+ err = perf_event__synthesize_schedstat(&(sched->tool),
+ process_synthesized_schedstat_event,
+ user_requested_cpus);
+ if (err < 0)
+ goto out;
+
+ err = enable_sched_schedstats(&reset);
+ if (err < 0)
+ goto out;
+
+ if (argc)
+ evlist__start_workload(evlist);
+
+ /* wait for signal */
+ pause();
+
+ if (reset) {
+ err = disable_sched_schedstat();
+ if (err < 0)
+ goto out;
+ }
+
+ err = perf_event__synthesize_schedstat(&(sched->tool),
+ process_synthesized_schedstat_event,
+ user_requested_cpus);
+ if (err < 0)
+ goto out;
+
+ err = perf_session__write_header(session, evlist, fd, true);
+
+out:
+ if (!err)
+ fprintf(stderr, "[ perf sched stats: Wrote samples to %s ]\n", data.path);
+ else
+ fprintf(stderr, "[ perf sched stats: Failed !! ]\n");
+
+ evlist__delete(evlist);
+ close(fd);
+ return err;
+}
+
static bool schedstat_events_exposed(void)
{
/*
@@ -3921,6 +4116,12 @@ int cmd_sched(int argc, const char **argv)
OPT_BOOLEAN('P', "pre-migrations", &sched.pre_migrations, "Show pre-migration wait time"),
OPT_PARENT(sched_options)
};
+ const struct option stats_options[] = {
+ OPT_STRING('o', "output", &output_name, "file",
+ "`stats record` with output filename"),
+ OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
+ OPT_END()
+ };
const char * const latency_usage[] = {
"perf sched latency [<options>]",
@@ -3938,9 +4139,13 @@ int cmd_sched(int argc, const char **argv)
"perf sched timehist [<options>]",
NULL
};
+ const char *stats_usage[] = {
+ "perf sched stats {record} [<options>]",
+ NULL
+ };
const char *const sched_subcommands[] = { "record", "latency", "map",
"replay", "script",
- "timehist", NULL };
+ "timehist", "stats", NULL };
const char *sched_usage[] = {
NULL,
NULL
@@ -4038,6 +4243,21 @@ int cmd_sched(int argc, const char **argv)
ret = symbol__validate_sym_arguments();
if (!ret)
ret = perf_sched__timehist(&sched);
+ } else if (!strcmp(argv[0], "stats")) {
+ const char *const stats_subcommands[] = {"record", NULL};
+
+ argc = parse_options_subcommand(argc, argv, stats_options,
+ stats_subcommands,
+ stats_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+
+ if (argv[0] && !strcmp(argv[0], "record")) {
+ if (argc)
+ argc = parse_options(argc, argv, stats_options,
+ stats_usage, 0);
+ return perf_sched__schedstat_record(&sched, argc, argv);
+ }
+ usage_with_options(stats_usage, stats_options);
} else {
usage_with_options(sched_usage, sched_options);
}
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index fcf44149feb2..41fb942ef701 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -82,6 +82,8 @@ static const char *perf_event__names[] = {
[PERF_RECORD_FINISHED_INIT] = "FINISHED_INIT",
[PERF_RECORD_COMPRESSED2] = "COMPRESSED2",
[PERF_RECORD_BPF_METADATA] = "BPF_METADATA",
+ [PERF_RECORD_SCHEDSTAT_CPU] = "SCHEDSTAT_CPU",
+ [PERF_RECORD_SCHEDSTAT_DOMAIN] = "SCHEDSTAT_DOMAIN",
};
const char *perf_event__name(unsigned int id)
@@ -570,6 +572,44 @@ size_t perf_event__fprintf_text_poke(union perf_event *event, struct machine *ma
return ret;
}
+size_t perf_event__fprintf_schedstat_cpu(union perf_event *event, FILE *fp)
+{
+ struct perf_record_schedstat_cpu *cs = &event->schedstat_cpu;
+ size_t size = fprintf(fp, "\ncpu%u ", cs->cpu);
+ __u16 version = cs->version;
+
+#define CPU_FIELD(_type, _name, _desc, _format, _is_pct, _pct_of, _ver) \
+ size += fprintf(fp, "%" PRIu64 " ", (unsigned long)cs->_ver._name)
+
+ if (version == 15) {
+#include <perf/schedstat-v15.h>
+ return size;
+ }
+#undef CPU_FIELD
+
+ return fprintf(fp, "Unsupported /proc/schedstat version %d.\n",
+ event->schedstat_cpu.version);
+}
+
+size_t perf_event__fprintf_schedstat_domain(union perf_event *event, FILE *fp)
+{
+ struct perf_record_schedstat_domain *ds = &event->schedstat_domain;
+ __u16 version = ds->version;
+ size_t size = fprintf(fp, "\ndomain%u ", ds->domain);
+
+#define DOMAIN_FIELD(_type, _name, _desc, _format, _is_jiffies, _ver) \
+ size += fprintf(fp, "%" PRIu64 " ", (unsigned long)ds->_ver._name)
+
+ if (version == 15) {
+#include <perf/schedstat-v15.h>
+ return size;
+ }
+#undef DOMAIN_FIELD
+
+ return fprintf(fp, "Unsupported /proc/schedstat version %d.\n",
+ event->schedstat_domain.version);
+}
+
size_t perf_event__fprintf(union perf_event *event, struct machine *machine, FILE *fp)
{
size_t ret = fprintf(fp, "PERF_RECORD_%s",
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index e40d16d3246c..5ab8ecc3e1ba 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -372,6 +372,8 @@ size_t perf_event__fprintf_ksymbol(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_bpf(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_bpf_metadata(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_text_poke(union perf_event *event, struct machine *machine,FILE *fp);
+size_t perf_event__fprintf_schedstat_cpu(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_schedstat_domain(union perf_event *event, FILE *fp);
size_t perf_event__fprintf(union perf_event *event, struct machine *machine, FILE *fp);
int kallsyms__get_function_start(const char *kallsyms_filename,
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 26ae078278cd..0d5aa6345223 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -697,6 +697,20 @@ static void perf_event__time_conv_swap(union perf_event *event,
}
}
+static void
+perf_event__schedstat_cpu_swap(union perf_event *event __maybe_unused,
+ bool sample_id_all __maybe_unused)
+{
+ /* FIXME */
+}
+
+static void
+perf_event__schedstat_domain_swap(union perf_event *event __maybe_unused,
+ bool sample_id_all __maybe_unused)
+{
+ /* FIXME */
+}
+
typedef void (*perf_event__swap_op)(union perf_event *event,
bool sample_id_all);
@@ -735,6 +749,8 @@ static perf_event__swap_op perf_event__swap_ops[] = {
[PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap,
[PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap,
[PERF_RECORD_TIME_CONV] = perf_event__time_conv_swap,
+ [PERF_RECORD_SCHEDSTAT_CPU] = perf_event__schedstat_cpu_swap,
+ [PERF_RECORD_SCHEDSTAT_DOMAIN] = perf_event__schedstat_domain_swap,
[PERF_RECORD_HEADER_MAX] = NULL,
};
@@ -1499,6 +1515,12 @@ static s64 perf_session__process_user_event(struct perf_session *session,
case PERF_RECORD_BPF_METADATA:
err = tool->bpf_metadata(session, event);
break;
+ case PERF_RECORD_SCHEDSTAT_CPU:
+ err = tool->schedstat_cpu(session, event);
+ break;
+ case PERF_RECORD_SCHEDSTAT_DOMAIN:
+ err = tool->schedstat_domain(session, event);
+ break;
default:
err = -EINVAL;
break;
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
index fcd1fd13c30e..01135d80fde5 100644
--- a/tools/perf/util/synthetic-events.c
+++ b/tools/perf/util/synthetic-events.c
@@ -2529,3 +2529,182 @@ int parse_synth_opt(char *synth)
return ret;
}
+
+static union perf_event *__synthesize_schedstat_cpu(struct io *io, __u16 version,
+ __u64 *cpu, __u64 timestamp)
+{
+ struct perf_record_schedstat_cpu *cs;
+ union perf_event *event;
+ size_t size;
+ char ch;
+
+ size = sizeof(*cs);
+ size = PERF_ALIGN(size, sizeof(u64));
+ event = zalloc(size);
+
+ if (!event)
+ return NULL;
+
+ cs = &event->schedstat_cpu;
+ cs->header.type = PERF_RECORD_SCHEDSTAT_CPU;
+ cs->header.size = size;
+ cs->timestamp = timestamp;
+
+ if (io__get_char(io) != 'p' || io__get_char(io) != 'u')
+ goto out_cpu;
+
+ if (io__get_dec(io, (__u64 *)cpu) != ' ')
+ goto out_cpu;
+
+#define CPU_FIELD(_type, _name, _desc, _format, _is_pct, _pct_of, _ver) \
+ do { \
+ __u64 _tmp; \
+ ch = io__get_dec(io, &_tmp); \
+ if (ch != ' ' && ch != '\n') \
+ goto out_cpu; \
+ cs->_ver._name = _tmp; \
+ } while (0)
+
+ if (version == 15) {
+#include <perf/schedstat-v15.h>
+ }
+#undef CPU_FIELD
+
+ cs->cpu = *cpu;
+ cs->version = version;
+
+ return event;
+out_cpu:
+ free(event);
+ return NULL;
+}
+
+static union perf_event *__synthesize_schedstat_domain(struct io *io, __u16 version,
+ __u64 cpu, __u64 timestamp)
+{
+ struct perf_record_schedstat_domain *ds;
+ union perf_event *event = NULL;
+ __u64 d_num;
+ size_t size;
+ char ch;
+
+ if (io__get_char(io) != 'o' || io__get_char(io) != 'm' || io__get_char(io) != 'a' ||
+ io__get_char(io) != 'i' || io__get_char(io) != 'n')
+ return NULL;
+
+ ch = io__get_dec(io, &d_num);
+
+ /* Skip cpumask as it can be extracted from perf header */
+ while (io__get_char(io) != ' ')
+ continue;
+
+ size = sizeof(*ds);
+ size = PERF_ALIGN(size, sizeof(u64));
+ event = zalloc(size);
+
+ ds = &event->schedstat_domain;
+ ds->header.type = PERF_RECORD_SCHEDSTAT_DOMAIN;
+ ds->header.size = size;
+ ds->version = version;
+ ds->timestamp = timestamp;
+ ds->domain = d_num;
+
+#define DOMAIN_FIELD(_type, _name, _desc, _format, _is_jiffies, _ver) \
+ do { \
+ __u64 _tmp; \
+ ch = io__get_dec(io, &_tmp); \
+ if (ch != ' ' && ch != '\n') \
+ goto out_domain; \
+ ds->_ver._name = _tmp; \
+ } while (0)
+
+ if (version == 15) {
+#include <perf/schedstat-v15.h>
+ }
+#undef DOMAIN_FIELD
+
+ ds->cpu = cpu;
+ goto out;
+
+out_domain:
+ free(event);
+ event = NULL;
+out:
+ return event;
+}
+
+int perf_event__synthesize_schedstat(const struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct perf_cpu_map *user_requested_cpus)
+{
+ char *line = NULL, path[PATH_MAX];
+ union perf_event *event = NULL;
+ size_t line_len = 0;
+ char bf[BUFSIZ];
+ __u64 timestamp;
+ __u64 cpu = -1;
+ __u16 version;
+ struct io io;
+ int ret = -1;
+ char ch;
+
+ snprintf(path, PATH_MAX, "%s/schedstat", procfs__mountpoint());
+ io.fd = open(path, O_RDONLY, 0);
+ if (io.fd < 0) {
+ pr_err("Failed to open %s. Possibly CONFIG_SCHEDSTAT is disabled.\n", path);
+ return -1;
+ }
+ io__init(&io, io.fd, bf, sizeof(bf));
+
+ if (io__getline(&io, &line, &line_len) < 0 || !line_len)
+ goto out;
+
+ if (!strcmp(line, "version 15\n")) {
+ version = 15;
+ } else {
+ pr_err("Unsupported %s version: %s", path, line + 8);
+ goto out_free_line;
+ }
+
+ if (io__getline(&io, &line, &line_len) < 0 || !line_len)
+ goto out_free_line;
+ timestamp = atol(line + 10);
+
+ /*
+ * FIXME: Can be optimized a bit by not synthesizing domain samples
+ * for filtered out cpus.
+ */
+ for (ch = io__get_char(&io); !io.eof; ch = io__get_char(&io)) {
+ struct perf_cpu this_cpu;
+
+ if (ch == 'c') {
+ event = __synthesize_schedstat_cpu(&io, version,
+ &cpu, timestamp);
+ } else if (ch == 'd') {
+ event = __synthesize_schedstat_domain(&io, version,
+ cpu, timestamp);
+ }
+ if (!event)
+ goto out_free_line;
+
+ this_cpu.cpu = cpu;
+
+ if (user_requested_cpus && !perf_cpu_map__has(user_requested_cpus, this_cpu))
+ continue;
+
+ if (process(tool, event, NULL, NULL) < 0) {
+ free(event);
+ goto out_free_line;
+ }
+
+ free(event);
+ }
+
+ ret = 0;
+
+out_free_line:
+ free(line);
+out:
+ close(io.fd);
+ return ret;
+}
diff --git a/tools/perf/util/synthetic-events.h b/tools/perf/util/synthetic-events.h
index ee29615d68e5..b20ffe7d840d 100644
--- a/tools/perf/util/synthetic-events.h
+++ b/tools/perf/util/synthetic-events.h
@@ -143,4 +143,7 @@ int perf_event__synthesize_for_pipe(const struct perf_tool *tool,
struct perf_data *data,
perf_event__handler_t process);
+int perf_event__synthesize_schedstat(const struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct perf_cpu_map *user_requested_cpu);
#endif // __PERF_SYNTHETIC_EVENTS_H
diff --git a/tools/perf/util/tool.c b/tools/perf/util/tool.c
index e83c7ababc2a..d17e5d356b3c 100644
--- a/tools/perf/util/tool.c
+++ b/tools/perf/util/tool.c
@@ -245,7 +245,23 @@ static int perf_event__process_bpf_metadata_stub(struct perf_session *perf_sessi
{
if (dump_trace)
perf_event__fprintf_bpf_metadata(event, stdout);
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+static int process_schedstat_cpu_stub(struct perf_session *perf_session __maybe_unused,
+ union perf_event *event)
+{
+ if (dump_trace)
+ perf_event__fprintf_schedstat_cpu(event, stdout);
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+static int process_schedstat_domain_stub(struct perf_session *perf_session __maybe_unused,
+ union perf_event *event)
+{
+ if (dump_trace)
+ perf_event__fprintf_schedstat_domain(event, stdout);
dump_printf(": unhandled!\n");
return 0;
}
@@ -307,6 +323,8 @@ void perf_tool__init(struct perf_tool *tool, bool ordered_events)
#endif
tool->finished_init = process_event_op2_stub;
tool->bpf_metadata = perf_event__process_bpf_metadata_stub;
+ tool->schedstat_cpu = process_schedstat_cpu_stub;
+ tool->schedstat_domain = process_schedstat_domain_stub;
}
bool perf_tool__compressed_is_stub(const struct perf_tool *tool)
diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h
index 18b76ff0f26a..d0ea049b5f6f 100644
--- a/tools/perf/util/tool.h
+++ b/tools/perf/util/tool.h
@@ -78,7 +78,9 @@ struct perf_tool {
stat_round,
feature,
finished_init,
- bpf_metadata;
+ bpf_metadata,
+ schedstat_cpu,
+ schedstat_domain;
event_op4 compressed;
event_op3 auxtrace;
bool ordered_events;
--
2.43.0
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH RESEND v4 05/11] perf sched stats: Add schedstat v16 support
2025-09-09 11:42 [PATCH RESEND v4 00/11] perf sched: Introduce stats tool Swapnil Sapkal
` (3 preceding siblings ...)
2025-09-09 11:42 ` [PATCH RESEND v4 04/11] perf sched stats: Add record and rawdump support Swapnil Sapkal
@ 2025-09-09 11:42 ` Swapnil Sapkal
2025-09-09 11:42 ` [PATCH RESEND v4 06/11] perf sched stats: Add schedstat v17 support Swapnil Sapkal
` (5 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: Swapnil Sapkal @ 2025-09-09 11:42 UTC (permalink / raw)
To: peterz, mingo, acme, namhyung, irogers, james.clark
Cc: ravi.bangoria, swapnil.sapkal, yu.c.chen, mark.rutland,
alexander.shishkin, jolsa, rostedt, vincent.guittot,
adrian.hunter, kan.liang, gautham.shenoy, kprateek.nayak,
juri.lelli, yangjihong, void, tj, sshegde, ctshao, quic_zhonhan,
thomas.falcon, blakejones, ashelat, leo.yan, dvyukov, ak,
yujie.liu, graham.woodward, ben.gainey, vineethr, tim.c.chen,
linux, linux-kernel, linux-perf-users, santosh.shukla,
sandipan.das, James Clark
/proc/schedstat file output is standardized with version number.
Add support to record and raw dump v16 version layout.
Co-developed-by: Ravi Bangoria <ravi.bangoria@amd.com>
Signed-off-by: Ravi Bangoria <ravi.bangoria@amd.com>
Tested-by: James Clark <james.clark@linaro.org>
Signed-off-by: Swapnil Sapkal <swapnil.sapkal@amd.com>
---
tools/lib/perf/Makefile | 2 +-
tools/lib/perf/include/perf/event.h | 14 ++
tools/lib/perf/include/perf/schedstat-v16.h | 146 ++++++++++++++++++++
tools/perf/util/event.c | 6 +
tools/perf/util/synthetic-events.c | 6 +
5 files changed, 173 insertions(+), 1 deletion(-)
create mode 100644 tools/lib/perf/include/perf/schedstat-v16.h
diff --git a/tools/lib/perf/Makefile b/tools/lib/perf/Makefile
index 9fa28e512ca8..965e066fd780 100644
--- a/tools/lib/perf/Makefile
+++ b/tools/lib/perf/Makefile
@@ -179,7 +179,7 @@ install_lib: libs
cp -fpR $(LIBPERF_ALL) $(DESTDIR)$(libdir_SQ)
HDRS := bpf_perf.h core.h cpumap.h threadmap.h evlist.h evsel.h event.h mmap.h
-HDRS += schedstat-v15.h
+HDRS += schedstat-v15.h schedstat-v16.h
INTERNAL_HDRS := cpumap.h evlist.h evsel.h lib.h mmap.h rc_check.h threadmap.h xyarray.h
INSTALL_HDRS_PFX := $(DESTDIR)$(prefix)/include/perf
diff --git a/tools/lib/perf/include/perf/event.h b/tools/lib/perf/include/perf/event.h
index 7ed7bae73b55..4c38a58e19c3 100644
--- a/tools/lib/perf/include/perf/event.h
+++ b/tools/lib/perf/include/perf/event.h
@@ -489,6 +489,12 @@ struct perf_record_schedstat_cpu_v15 {
#undef CPU_FIELD
};
+struct perf_record_schedstat_cpu_v16 {
+#define CPU_FIELD(_type, _name, _desc, _format, _is_pct, _pct_of, _ver) _type _name
+#include "schedstat-v16.h"
+#undef CPU_FIELD
+};
+
struct perf_record_schedstat_cpu {
struct perf_event_header header;
__u64 timestamp;
@@ -498,6 +504,7 @@ struct perf_record_schedstat_cpu {
char __pad[2];
union {
struct perf_record_schedstat_cpu_v15 v15;
+ struct perf_record_schedstat_cpu_v16 v16;
};
};
@@ -507,6 +514,12 @@ struct perf_record_schedstat_domain_v15 {
#undef DOMAIN_FIELD
};
+struct perf_record_schedstat_domain_v16 {
+#define DOMAIN_FIELD(_type, _name, _desc, _format, _is_jiffies, _ver) _type _name
+#include "schedstat-v16.h"
+#undef DOMAIN_FIELD
+};
+
#define DOMAIN_NAME_LEN 16
struct perf_record_schedstat_domain {
@@ -517,6 +530,7 @@ struct perf_record_schedstat_domain {
__u16 domain;
union {
struct perf_record_schedstat_domain_v15 v15;
+ struct perf_record_schedstat_domain_v16 v16;
};
};
diff --git a/tools/lib/perf/include/perf/schedstat-v16.h b/tools/lib/perf/include/perf/schedstat-v16.h
new file mode 100644
index 000000000000..3462b79c29af
--- /dev/null
+++ b/tools/lib/perf/include/perf/schedstat-v16.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifdef CPU_FIELD
+CPU_FIELD(__u32, yld_count, "sched_yield() count",
+ "%11u", false, yld_count, v16);
+CPU_FIELD(__u32, array_exp, "Legacy counter can be ignored",
+ "%11u", false, array_exp, v16);
+CPU_FIELD(__u32, sched_count, "schedule() called",
+ "%11u", false, sched_count, v16);
+CPU_FIELD(__u32, sched_goidle, "schedule() left the processor idle",
+ "%11u", true, sched_count, v16);
+CPU_FIELD(__u32, ttwu_count, "try_to_wake_up() was called",
+ "%11u", false, ttwu_count, v16);
+CPU_FIELD(__u32, ttwu_local, "try_to_wake_up() was called to wake up the local cpu",
+ "%11u", true, ttwu_count, v16);
+CPU_FIELD(__u64, rq_cpu_time, "total runtime by tasks on this processor (in jiffies)",
+ "%11llu", false, rq_cpu_time, v16);
+CPU_FIELD(__u64, run_delay, "total waittime by tasks on this processor (in jiffies)",
+ "%11llu", true, rq_cpu_time, v16);
+CPU_FIELD(__u64, pcount, "total timeslices run on this cpu",
+ "%11llu", false, pcount, v16);
+#endif /* CPU_FIELD */
+
+#ifdef DOMAIN_FIELD
+#ifdef DOMAIN_CATEGORY
+DOMAIN_CATEGORY(" <Category busy> ");
+#endif
+DOMAIN_FIELD(__u32, busy_lb_count,
+ "load_balance() count on cpu busy", "%11u", true, v16);
+DOMAIN_FIELD(__u32, busy_lb_balanced,
+ "load_balance() found balanced on cpu busy", "%11u", true, v16);
+DOMAIN_FIELD(__u32, busy_lb_failed,
+ "load_balance() move task failed on cpu busy", "%11u", true, v16);
+DOMAIN_FIELD(__u32, busy_lb_imbalance,
+ "imbalance sum on cpu busy", "%11u", false, v16);
+DOMAIN_FIELD(__u32, busy_lb_gained,
+ "pull_task() count on cpu busy", "%11u", false, v16);
+DOMAIN_FIELD(__u32, busy_lb_hot_gained,
+ "pull_task() when target task was cache-hot on cpu busy", "%11u", false, v16);
+DOMAIN_FIELD(__u32, busy_lb_nobusyq,
+ "load_balance() failed to find busier queue on cpu busy", "%11u", true, v16);
+DOMAIN_FIELD(__u32, busy_lb_nobusyg,
+ "load_balance() failed to find busier group on cpu busy", "%11u", true, v16);
+#ifdef DERIVED_CNT_FIELD
+DERIVED_CNT_FIELD(busy_lb_success_count, "load_balance() success count on cpu busy", "%11u",
+ busy_lb_count, busy_lb_balanced, busy_lb_failed, v16);
+#endif
+#ifdef DERIVED_AVG_FIELD
+DERIVED_AVG_FIELD(busy_lb_avg_pulled,
+ "avg task pulled per successful lb attempt (cpu busy)", "%11.2Lf",
+ busy_lb_count, busy_lb_balanced, busy_lb_failed, busy_lb_gained, v16);
+#endif
+#ifdef DOMAIN_CATEGORY
+DOMAIN_CATEGORY(" <Category idle> ");
+#endif
+DOMAIN_FIELD(__u32, idle_lb_count,
+ "load_balance() count on cpu idle", "%11u", true, v16);
+DOMAIN_FIELD(__u32, idle_lb_balanced,
+ "load_balance() found balanced on cpu idle", "%11u", true, v16);
+DOMAIN_FIELD(__u32, idle_lb_failed,
+ "load_balance() move task failed on cpu idle", "%11u", true, v16);
+DOMAIN_FIELD(__u32, idle_lb_imbalance,
+ "imbalance sum on cpu idle", "%11u", false, v16);
+DOMAIN_FIELD(__u32, idle_lb_gained,
+ "pull_task() count on cpu idle", "%11u", false, v16);
+DOMAIN_FIELD(__u32, idle_lb_hot_gained,
+ "pull_task() when target task was cache-hot on cpu idle", "%11u", false, v16);
+DOMAIN_FIELD(__u32, idle_lb_nobusyq,
+ "load_balance() failed to find busier queue on cpu idle", "%11u", true, v16);
+DOMAIN_FIELD(__u32, idle_lb_nobusyg,
+ "load_balance() failed to find busier group on cpu idle", "%11u", true, v16);
+#ifdef DERIVED_CNT_FIELD
+DERIVED_CNT_FIELD(idle_lb_success_count, "load_balance() success count on cpu idle", "%11u",
+ idle_lb_count, idle_lb_balanced, idle_lb_failed, v16);
+#endif
+#ifdef DERIVED_AVG_FIELD
+DERIVED_AVG_FIELD(idle_lb_avg_pulled,
+ "avg task pulled per successful lb attempt (cpu idle)", "%11.2Lf",
+ idle_lb_count, idle_lb_balanced, idle_lb_failed, idle_lb_gained, v16);
+#endif
+#ifdef DOMAIN_CATEGORY
+DOMAIN_CATEGORY(" <Category newidle> ");
+#endif
+DOMAIN_FIELD(__u32, newidle_lb_count,
+ "load_balance() count on cpu newly idle", "%11u", true, v16);
+DOMAIN_FIELD(__u32, newidle_lb_balanced,
+ "load_balance() found balanced on cpu newly idle", "%11u", true, v16);
+DOMAIN_FIELD(__u32, newidle_lb_failed,
+ "load_balance() move task failed on cpu newly idle", "%11u", true, v16);
+DOMAIN_FIELD(__u32, newidle_lb_imbalance,
+ "imbalance sum on cpu newly idle", "%11u", false, v16);
+DOMAIN_FIELD(__u32, newidle_lb_gained,
+ "pull_task() count on cpu newly idle", "%11u", false, v16);
+DOMAIN_FIELD(__u32, newidle_lb_hot_gained,
+ "pull_task() when target task was cache-hot on cpu newly idle", "%11u", false, v16);
+DOMAIN_FIELD(__u32, newidle_lb_nobusyq,
+ "load_balance() failed to find busier queue on cpu newly idle", "%11u", true, v16);
+DOMAIN_FIELD(__u32, newidle_lb_nobusyg,
+ "load_balance() failed to find busier group on cpu newly idle", "%11u", true, v16);
+#ifdef DERIVED_CNT_FIELD
+DERIVED_CNT_FIELD(newidle_lb_success_count,
+ "load_balance() success count on cpu newly idle", "%11u",
+ newidle_lb_count, newidle_lb_balanced, newidle_lb_failed, v16);
+#endif
+#ifdef DERIVED_AVG_FIELD
+DERIVED_AVG_FIELD(newidle_lb_avg_count,
+ "avg task pulled per successful lb attempt (cpu newly idle)", "%11.2Lf",
+ newidle_lb_count, newidle_lb_balanced, newidle_lb_failed, newidle_lb_gained, v16);
+#endif
+#ifdef DOMAIN_CATEGORY
+DOMAIN_CATEGORY(" <Category active_load_balance()> ");
+#endif
+DOMAIN_FIELD(__u32, alb_count,
+ "active_load_balance() count", "%11u", false, v16);
+DOMAIN_FIELD(__u32, alb_failed,
+ "active_load_balance() move task failed", "%11u", false, v16);
+DOMAIN_FIELD(__u32, alb_pushed,
+ "active_load_balance() successfully moved a task", "%11u", false, v16);
+#ifdef DOMAIN_CATEGORY
+DOMAIN_CATEGORY(" <Category sched_balance_exec()> ");
+#endif
+DOMAIN_FIELD(__u32, sbe_count,
+ "sbe_count is not used", "%11u", false, v16);
+DOMAIN_FIELD(__u32, sbe_balanced,
+ "sbe_balanced is not used", "%11u", false, v16);
+DOMAIN_FIELD(__u32, sbe_pushed,
+ "sbe_pushed is not used", "%11u", false, v16);
+#ifdef DOMAIN_CATEGORY
+DOMAIN_CATEGORY(" <Category sched_balance_fork()> ");
+#endif
+DOMAIN_FIELD(__u32, sbf_count,
+ "sbf_count is not used", "%11u", false, v16);
+DOMAIN_FIELD(__u32, sbf_balanced,
+ "sbf_balanced is not used", "%11u", false, v16);
+DOMAIN_FIELD(__u32, sbf_pushed,
+ "sbf_pushed is not used", "%11u", false, v16);
+#ifdef DOMAIN_CATEGORY
+DOMAIN_CATEGORY(" <Wakeup Info> ");
+#endif
+DOMAIN_FIELD(__u32, ttwu_wake_remote,
+ "try_to_wake_up() awoke a task that last ran on a diff cpu", "%11u", false, v16);
+DOMAIN_FIELD(__u32, ttwu_move_affine,
+ "try_to_wake_up() moved task because cache-cold on own cpu", "%11u", false, v16);
+DOMAIN_FIELD(__u32, ttwu_move_balance,
+ "try_to_wake_up() started passive balancing", "%11u", false, v16);
+#endif /* DOMAIN_FIELD */
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 41fb942ef701..88a5dad240ac 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -584,6 +584,9 @@ size_t perf_event__fprintf_schedstat_cpu(union perf_event *event, FILE *fp)
if (version == 15) {
#include <perf/schedstat-v15.h>
return size;
+ } else if (version == 16) {
+#include <perf/schedstat-v16.h>
+ return size;
}
#undef CPU_FIELD
@@ -603,6 +606,9 @@ size_t perf_event__fprintf_schedstat_domain(union perf_event *event, FILE *fp)
if (version == 15) {
#include <perf/schedstat-v15.h>
return size;
+ } else if (version == 16) {
+#include <perf/schedstat-v16.h>
+ return size;
}
#undef DOMAIN_FIELD
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
index 01135d80fde5..499c819bb8ff 100644
--- a/tools/perf/util/synthetic-events.c
+++ b/tools/perf/util/synthetic-events.c
@@ -2567,6 +2567,8 @@ static union perf_event *__synthesize_schedstat_cpu(struct io *io, __u16 version
if (version == 15) {
#include <perf/schedstat-v15.h>
+ } else if (version == 16) {
+#include <perf/schedstat-v16.h>
}
#undef CPU_FIELD
@@ -2620,6 +2622,8 @@ static union perf_event *__synthesize_schedstat_domain(struct io *io, __u16 vers
if (version == 15) {
#include <perf/schedstat-v15.h>
+ } else if (version == 16) {
+#include <perf/schedstat-v16.h>
}
#undef DOMAIN_FIELD
@@ -2661,6 +2665,8 @@ int perf_event__synthesize_schedstat(const struct perf_tool *tool,
if (!strcmp(line, "version 15\n")) {
version = 15;
+ } else if (!strcmp(line, "version 16\n")) {
+ version = 16;
} else {
pr_err("Unsupported %s version: %s", path, line + 8);
goto out_free_line;
--
2.43.0
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH RESEND v4 06/11] perf sched stats: Add schedstat v17 support
2025-09-09 11:42 [PATCH RESEND v4 00/11] perf sched: Introduce stats tool Swapnil Sapkal
` (4 preceding siblings ...)
2025-09-09 11:42 ` [PATCH RESEND v4 05/11] perf sched stats: Add schedstat v16 support Swapnil Sapkal
@ 2025-09-09 11:42 ` Swapnil Sapkal
2025-09-09 11:42 ` [PATCH RESEND v4 07/11] perf sched stats: Add support for report subcommand Swapnil Sapkal
` (4 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: Swapnil Sapkal @ 2025-09-09 11:42 UTC (permalink / raw)
To: peterz, mingo, acme, namhyung, irogers, james.clark
Cc: ravi.bangoria, swapnil.sapkal, yu.c.chen, mark.rutland,
alexander.shishkin, jolsa, rostedt, vincent.guittot,
adrian.hunter, kan.liang, gautham.shenoy, kprateek.nayak,
juri.lelli, yangjihong, void, tj, sshegde, ctshao, quic_zhonhan,
thomas.falcon, blakejones, ashelat, leo.yan, dvyukov, ak,
yujie.liu, graham.woodward, ben.gainey, vineethr, tim.c.chen,
linux, linux-kernel, linux-perf-users, santosh.shukla,
sandipan.das
/proc/schedstat file output is standardized with version number.
Add support to record and raw dump v17 version layout.
Co-developed-by: Ravi Bangoria <ravi.bangoria@amd.com>
Signed-off-by: Ravi Bangoria <ravi.bangoria@amd.com>
Signed-off-by: Swapnil Sapkal <swapnil.sapkal@amd.com>
---
tools/lib/perf/Makefile | 2 +-
tools/lib/perf/include/perf/event.h | 14 ++
tools/lib/perf/include/perf/schedstat-v17.h | 164 ++++++++++++++++++++
tools/perf/util/event.c | 6 +
tools/perf/util/synthetic-events.c | 11 ++
5 files changed, 196 insertions(+), 1 deletion(-)
create mode 100644 tools/lib/perf/include/perf/schedstat-v17.h
diff --git a/tools/lib/perf/Makefile b/tools/lib/perf/Makefile
index 965e066fd780..27e6490f64dc 100644
--- a/tools/lib/perf/Makefile
+++ b/tools/lib/perf/Makefile
@@ -179,7 +179,7 @@ install_lib: libs
cp -fpR $(LIBPERF_ALL) $(DESTDIR)$(libdir_SQ)
HDRS := bpf_perf.h core.h cpumap.h threadmap.h evlist.h evsel.h event.h mmap.h
-HDRS += schedstat-v15.h schedstat-v16.h
+HDRS += schedstat-v15.h schedstat-v16.h schedstat-v17.h
INTERNAL_HDRS := cpumap.h evlist.h evsel.h lib.h mmap.h rc_check.h threadmap.h xyarray.h
INSTALL_HDRS_PFX := $(DESTDIR)$(prefix)/include/perf
diff --git a/tools/lib/perf/include/perf/event.h b/tools/lib/perf/include/perf/event.h
index 4c38a58e19c3..5a93a84f49f8 100644
--- a/tools/lib/perf/include/perf/event.h
+++ b/tools/lib/perf/include/perf/event.h
@@ -495,6 +495,12 @@ struct perf_record_schedstat_cpu_v16 {
#undef CPU_FIELD
};
+struct perf_record_schedstat_cpu_v17 {
+#define CPU_FIELD(_type, _name, _desc, _format, _is_pct, _pct_of, _ver) _type _name
+#include "schedstat-v17.h"
+#undef CPU_FIELD
+};
+
struct perf_record_schedstat_cpu {
struct perf_event_header header;
__u64 timestamp;
@@ -505,6 +511,7 @@ struct perf_record_schedstat_cpu {
union {
struct perf_record_schedstat_cpu_v15 v15;
struct perf_record_schedstat_cpu_v16 v16;
+ struct perf_record_schedstat_cpu_v17 v17;
};
};
@@ -520,6 +527,12 @@ struct perf_record_schedstat_domain_v16 {
#undef DOMAIN_FIELD
};
+struct perf_record_schedstat_domain_v17 {
+#define DOMAIN_FIELD(_type, _name, _desc, _format, _is_jiffies, _ver) _type _name
+#include "schedstat-v17.h"
+#undef DOMAIN_FIELD
+};
+
#define DOMAIN_NAME_LEN 16
struct perf_record_schedstat_domain {
@@ -531,6 +544,7 @@ struct perf_record_schedstat_domain {
union {
struct perf_record_schedstat_domain_v15 v15;
struct perf_record_schedstat_domain_v16 v16;
+ struct perf_record_schedstat_domain_v17 v17;
};
};
diff --git a/tools/lib/perf/include/perf/schedstat-v17.h b/tools/lib/perf/include/perf/schedstat-v17.h
new file mode 100644
index 000000000000..865dc7c1039c
--- /dev/null
+++ b/tools/lib/perf/include/perf/schedstat-v17.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifdef CPU_FIELD
+CPU_FIELD(__u32, yld_count, "sched_yield() count",
+ "%11u", false, yld_count, v17);
+CPU_FIELD(__u32, array_exp, "Legacy counter can be ignored",
+ "%11u", false, array_exp, v17);
+CPU_FIELD(__u32, sched_count, "schedule() called",
+ "%11u", false, sched_count, v17);
+CPU_FIELD(__u32, sched_goidle, "schedule() left the processor idle",
+ "%11u", true, sched_count, v17);
+CPU_FIELD(__u32, ttwu_count, "try_to_wake_up() was called",
+ "%11u", false, ttwu_count, v17);
+CPU_FIELD(__u32, ttwu_local, "try_to_wake_up() was called to wake up the local cpu",
+ "%11u", true, ttwu_count, v17);
+CPU_FIELD(__u64, rq_cpu_time, "total runtime by tasks on this processor (in jiffies)",
+ "%11llu", false, rq_cpu_time, v17);
+CPU_FIELD(__u64, run_delay, "total waittime by tasks on this processor (in jiffies)",
+ "%11llu", true, rq_cpu_time, v17);
+CPU_FIELD(__u64, pcount, "total timeslices run on this cpu",
+ "%11llu", false, pcount, v17);
+#endif /* CPU_FIELD */
+
+#ifdef DOMAIN_FIELD
+#ifdef DOMAIN_CATEGORY
+DOMAIN_CATEGORY(" <Category busy> ");
+#endif
+DOMAIN_FIELD(__u32, busy_lb_count,
+ "load_balance() count on cpu busy", "%11u", true, v17);
+DOMAIN_FIELD(__u32, busy_lb_balanced,
+ "load_balance() found balanced on cpu busy", "%11u", true, v17);
+DOMAIN_FIELD(__u32, busy_lb_failed,
+ "load_balance() move task failed on cpu busy", "%11u", true, v17);
+DOMAIN_FIELD(__u32, busy_lb_imbalance_load,
+ "imbalance in load on cpu busy", "%11u", false, v17);
+DOMAIN_FIELD(__u32, busy_lb_imbalance_util,
+ "imbalance in utilization on cpu busy", "%11u", false, v17);
+DOMAIN_FIELD(__u32, busy_lb_imbalance_task,
+ "imbalance in number of tasks on cpu busy", "%11u", false, v17);
+DOMAIN_FIELD(__u32, busy_lb_imbalance_misfit,
+ "imbalance in misfit tasks on cpu busy", "%11u", false, v17);
+DOMAIN_FIELD(__u32, busy_lb_gained,
+ "pull_task() count on cpu busy", "%11u", false, v17);
+DOMAIN_FIELD(__u32, busy_lb_hot_gained,
+ "pull_task() when target task was cache-hot on cpu busy", "%11u", false, v17);
+DOMAIN_FIELD(__u32, busy_lb_nobusyq,
+ "load_balance() failed to find busier queue on cpu busy", "%11u", true, v17);
+DOMAIN_FIELD(__u32, busy_lb_nobusyg,
+ "load_balance() failed to find busier group on cpu busy", "%11u", true, v17);
+#ifdef DERIVED_CNT_FIELD
+DERIVED_CNT_FIELD(busy_lb_success_count, "load_balance() success count on cpu busy", "%11u",
+ busy_lb_count, busy_lb_balanced, busy_lb_failed, v17);
+#endif
+#ifdef DERIVED_AVG_FIELD
+DERIVED_AVG_FIELD(busy_lb_avg_pulled,
+ "avg task pulled per successful lb attempt (cpu busy)", "%11.2Lf",
+ busy_lb_count, busy_lb_balanced, busy_lb_failed, busy_lb_gained, v17);
+#endif
+#ifdef DOMAIN_CATEGORY
+DOMAIN_CATEGORY(" <Category idle> ");
+#endif
+DOMAIN_FIELD(__u32, idle_lb_count,
+ "load_balance() count on cpu idle", "%11u", true, v17);
+DOMAIN_FIELD(__u32, idle_lb_balanced,
+ "load_balance() found balanced on cpu idle", "%11u", true, v17);
+DOMAIN_FIELD(__u32, idle_lb_failed,
+ "load_balance() move task failed on cpu idle", "%11u", true, v17);
+DOMAIN_FIELD(__u32, idle_lb_imbalance_load,
+ "imbalance in load on cpu idle", "%11u", false, v17);
+DOMAIN_FIELD(__u32, idle_lb_imbalance_util,
+ "imbalance in utilization on cpu idle", "%11u", false, v17);
+DOMAIN_FIELD(__u32, idle_lb_imbalance_task,
+ "imbalance in number of tasks on cpu idle", "%11u", false, v17);
+DOMAIN_FIELD(__u32, idle_lb_imbalance_misfit,
+ "imbalance in misfit tasks on cpu idle", "%11u", false, v17);
+DOMAIN_FIELD(__u32, idle_lb_gained,
+ "pull_task() count on cpu idle", "%11u", false, v17);
+DOMAIN_FIELD(__u32, idle_lb_hot_gained,
+ "pull_task() when target task was cache-hot on cpu idle", "%11u", false, v17);
+DOMAIN_FIELD(__u32, idle_lb_nobusyq,
+ "load_balance() failed to find busier queue on cpu idle", "%11u", true, v17);
+DOMAIN_FIELD(__u32, idle_lb_nobusyg,
+ "load_balance() failed to find busier group on cpu idle", "%11u", true, v17);
+#ifdef DERIVED_CNT_FIELD
+DERIVED_CNT_FIELD(idle_lb_success_count, "load_balance() success count on cpu idle", "%11u",
+ idle_lb_count, idle_lb_balanced, idle_lb_failed, v17);
+#endif
+#ifdef DERIVED_AVG_FIELD
+DERIVED_AVG_FIELD(idle_lb_avg_pulled,
+ "avg task pulled per successful lb attempt (cpu idle)", "%11.2Lf",
+ idle_lb_count, idle_lb_balanced, idle_lb_failed, idle_lb_gained, v17);
+#endif
+#ifdef DOMAIN_CATEGORY
+DOMAIN_CATEGORY(" <Category newidle> ");
+#endif
+DOMAIN_FIELD(__u32, newidle_lb_count,
+ "load_balance() count on cpu newly idle", "%11u", true, v17);
+DOMAIN_FIELD(__u32, newidle_lb_balanced,
+ "load_balance() found balanced on cpu newly idle", "%11u", true, v17);
+DOMAIN_FIELD(__u32, newidle_lb_failed,
+ "load_balance() move task failed on cpu newly idle", "%11u", true, v17);
+DOMAIN_FIELD(__u32, newidle_lb_imbalance_load,
+ "imbalance in load on cpu newly idle", "%11u", false, v17);
+DOMAIN_FIELD(__u32, newidle_lb_imbalance_util,
+ "imbalance in utilization on cpu newly idle", "%11u", false, v17);
+DOMAIN_FIELD(__u32, newidle_lb_imbalance_task,
+ "imbalance in number of tasks on cpu newly idle", "%11u", false, v17);
+DOMAIN_FIELD(__u32, newidle_lb_imbalance_misfit,
+ "imbalance in misfit tasks on cpu newly idle", "%11u", false, v17);
+DOMAIN_FIELD(__u32, newidle_lb_gained,
+ "pull_task() count on cpu newly idle", "%11u", false, v17);
+DOMAIN_FIELD(__u32, newidle_lb_hot_gained,
+ "pull_task() when target task was cache-hot on cpu newly idle", "%11u", false, v17);
+DOMAIN_FIELD(__u32, newidle_lb_nobusyq,
+ "load_balance() failed to find busier queue on cpu newly idle", "%11u", true, v17);
+DOMAIN_FIELD(__u32, newidle_lb_nobusyg,
+ "load_balance() failed to find busier group on cpu newly idle", "%11u", true, v17);
+#ifdef DERIVED_CNT_FIELD
+DERIVED_CNT_FIELD(newidle_lb_success_count,
+ "load_balance() success count on cpu newly idle", "%11u",
+ newidle_lb_count, newidle_lb_balanced, newidle_lb_failed, v17);
+#endif
+#ifdef DERIVED_AVG_FIELD
+DERIVED_AVG_FIELD(newidle_lb_avg_pulled,
+ "avg task pulled per successful lb attempt (cpu newly idle)", "%11.2Lf",
+ newidle_lb_count, newidle_lb_balanced, newidle_lb_failed, newidle_lb_gained, v17);
+#endif
+#ifdef DOMAIN_CATEGORY
+DOMAIN_CATEGORY(" <Category active_load_balance()> ");
+#endif
+DOMAIN_FIELD(__u32, alb_count,
+ "active_load_balance() count", "%11u", false, v17);
+DOMAIN_FIELD(__u32, alb_failed,
+ "active_load_balance() move task failed", "%11u", false, v17);
+DOMAIN_FIELD(__u32, alb_pushed,
+ "active_load_balance() successfully moved a task", "%11u", false, v17);
+#ifdef DOMAIN_CATEGORY
+DOMAIN_CATEGORY(" <Category sched_balance_exec()> ");
+#endif
+DOMAIN_FIELD(__u32, sbe_count,
+ "sbe_count is not used", "%11u", false, v17);
+DOMAIN_FIELD(__u32, sbe_balanced,
+ "sbe_balanced is not used", "%11u", false, v17);
+DOMAIN_FIELD(__u32, sbe_pushed,
+ "sbe_pushed is not used", "%11u", false, v17);
+#ifdef DOMAIN_CATEGORY
+DOMAIN_CATEGORY(" <Category sched_balance_fork()> ");
+#endif
+DOMAIN_FIELD(__u32, sbf_count,
+ "sbf_count is not used", "%11u", false, v17);
+DOMAIN_FIELD(__u32, sbf_balanced,
+ "sbf_balanced is not used", "%11u", false, v17);
+DOMAIN_FIELD(__u32, sbf_pushed,
+ "sbf_pushed is not used", "%11u", false, v17);
+#ifdef DOMAIN_CATEGORY
+DOMAIN_CATEGORY(" <Wakeup Info> ");
+#endif
+DOMAIN_FIELD(__u32, ttwu_wake_remote,
+ "try_to_wake_up() awoke a task that last ran on a diff cpu", "%11u", false, v17);
+DOMAIN_FIELD(__u32, ttwu_move_affine,
+ "try_to_wake_up() moved task because cache-cold on own cpu", "%11u", false, v17);
+DOMAIN_FIELD(__u32, ttwu_move_balance,
+ "try_to_wake_up() started passive balancing", "%11u", false, v17);
+#endif /* DOMAIN_FIELD */
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 88a5dad240ac..d50fd1c00dfc 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -587,6 +587,9 @@ size_t perf_event__fprintf_schedstat_cpu(union perf_event *event, FILE *fp)
} else if (version == 16) {
#include <perf/schedstat-v16.h>
return size;
+ } else if (version == 17) {
+#include <perf/schedstat-v17.h>
+ return size;
}
#undef CPU_FIELD
@@ -609,6 +612,9 @@ size_t perf_event__fprintf_schedstat_domain(union perf_event *event, FILE *fp)
} else if (version == 16) {
#include <perf/schedstat-v16.h>
return size;
+ } else if (version == 17) {
+#include <perf/schedstat-v17.h>
+ return size;
}
#undef DOMAIN_FIELD
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
index 499c819bb8ff..0c97ed711b60 100644
--- a/tools/perf/util/synthetic-events.c
+++ b/tools/perf/util/synthetic-events.c
@@ -2569,6 +2569,8 @@ static union perf_event *__synthesize_schedstat_cpu(struct io *io, __u16 version
#include <perf/schedstat-v15.h>
} else if (version == 16) {
#include <perf/schedstat-v16.h>
+ } else if (version == 17) {
+#include <perf/schedstat-v17.h>
}
#undef CPU_FIELD
@@ -2595,6 +2597,11 @@ static union perf_event *__synthesize_schedstat_domain(struct io *io, __u16 vers
return NULL;
ch = io__get_dec(io, &d_num);
+ if (version >= 17) {
+ /* Skip domain name as it can be extracted from perf header */
+ while (io__get_char(io) != ' ')
+ continue;
+ }
/* Skip cpumask as it can be extracted from perf header */
while (io__get_char(io) != ' ')
@@ -2624,6 +2631,8 @@ static union perf_event *__synthesize_schedstat_domain(struct io *io, __u16 vers
#include <perf/schedstat-v15.h>
} else if (version == 16) {
#include <perf/schedstat-v16.h>
+ } else if (version == 17) {
+#include <perf/schedstat-v17.h>
}
#undef DOMAIN_FIELD
@@ -2667,6 +2676,8 @@ int perf_event__synthesize_schedstat(const struct perf_tool *tool,
version = 15;
} else if (!strcmp(line, "version 16\n")) {
version = 16;
+ } else if (!strcmp(line, "version 17\n")) {
+ version = 17;
} else {
pr_err("Unsupported %s version: %s", path, line + 8);
goto out_free_line;
--
2.43.0
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH RESEND v4 07/11] perf sched stats: Add support for report subcommand
2025-09-09 11:42 [PATCH RESEND v4 00/11] perf sched: Introduce stats tool Swapnil Sapkal
` (5 preceding siblings ...)
2025-09-09 11:42 ` [PATCH RESEND v4 06/11] perf sched stats: Add schedstat v17 support Swapnil Sapkal
@ 2025-09-09 11:42 ` Swapnil Sapkal
2025-09-09 11:42 ` [PATCH RESEND v4 08/11] perf sched stats: Add support for live mode Swapnil Sapkal
` (3 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: Swapnil Sapkal @ 2025-09-09 11:42 UTC (permalink / raw)
To: peterz, mingo, acme, namhyung, irogers, james.clark
Cc: ravi.bangoria, swapnil.sapkal, yu.c.chen, mark.rutland,
alexander.shishkin, jolsa, rostedt, vincent.guittot,
adrian.hunter, kan.liang, gautham.shenoy, kprateek.nayak,
juri.lelli, yangjihong, void, tj, sshegde, ctshao, quic_zhonhan,
thomas.falcon, blakejones, ashelat, leo.yan, dvyukov, ak,
yujie.liu, graham.woodward, ben.gainey, vineethr, tim.c.chen,
linux, linux-kernel, linux-perf-users, santosh.shukla,
sandipan.das, James Clark
`perf sched stats record` captures two sets of samples. For workload
profile, first set right before workload starts and second set after
workload finishes. For the systemwide profile, first set at the
beginning of profile and second set on receiving SIGINT signal.
Add `perf sched stats report` subcommand that will read both the set
of samples, get the diff and render a final report. Final report prints
scheduler stat at cpu granularity as well as sched domain granularity.
Example usage:
# perf sched stats record
# perf sched stats report
Co-developed-by: Ravi Bangoria <ravi.bangoria@amd.com>
Signed-off-by: Ravi Bangoria <ravi.bangoria@amd.com>
Tested-by: James Clark <james.clark@linaro.org>
Signed-off-by: Swapnil Sapkal <swapnil.sapkal@amd.com>
---
tools/perf/builtin-sched.c | 509 ++++++++++++++++++++++++++++++++++++-
1 file changed, 508 insertions(+), 1 deletion(-)
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 2573491fa5f8..e23018798f5b 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -3940,6 +3940,505 @@ static int perf_sched__schedstat_record(struct perf_sched *sched,
return err;
}
+struct schedstat_domain {
+ struct list_head domain_list;
+ struct perf_record_schedstat_domain *domain_data;
+};
+
+struct schedstat_cpu {
+ struct list_head cpu_list;
+ struct list_head domain_head;
+ struct perf_record_schedstat_cpu *cpu_data;
+};
+
+static struct list_head cpu_head = LIST_HEAD_INIT(cpu_head);
+static struct schedstat_cpu *cpu_second_pass;
+static struct schedstat_domain *domain_second_pass;
+static bool after_workload_flag;
+static bool verbose_field;
+
+static void store_schedtstat_cpu_diff(struct schedstat_cpu *after_workload)
+{
+ struct perf_record_schedstat_cpu *before = cpu_second_pass->cpu_data;
+ struct perf_record_schedstat_cpu *after = after_workload->cpu_data;
+ __u16 version = after_workload->cpu_data->version;
+
+#define CPU_FIELD(_type, _name, _desc, _format, _is_pct, _pct_of, _ver) \
+ (before->_ver._name = after->_ver._name - before->_ver._name)
+
+ if (version == 15) {
+#include <perf/schedstat-v15.h>
+ } else if (version == 16) {
+#include <perf/schedstat-v16.h>
+ } else if (version == 17) {
+#include <perf/schedstat-v17.h>
+ }
+
+#undef CPU_FIELD
+}
+
+static void store_schedstat_domain_diff(struct schedstat_domain *after_workload)
+{
+ struct perf_record_schedstat_domain *before = domain_second_pass->domain_data;
+ struct perf_record_schedstat_domain *after = after_workload->domain_data;
+ __u16 version = after_workload->domain_data->version;
+
+#define DOMAIN_FIELD(_type, _name, _desc, _format, _is_jiffies, _ver) \
+ (before->_ver._name = after->_ver._name - before->_ver._name)
+
+ if (version == 15) {
+#include <perf/schedstat-v15.h>
+ } else if (version == 16) {
+#include <perf/schedstat-v16.h>
+ } else if (version == 17) {
+#include <perf/schedstat-v17.h>
+ }
+#undef DOMAIN_FIELD
+}
+
+static inline void print_cpu_stats(struct perf_record_schedstat_cpu *cs)
+{
+ printf("%-65s %12s %12s\n", "DESC", "COUNT", "PCT_CHANGE");
+ printf("%.*s\n", 100, graph_dotted_line);
+
+#define CALC_PCT(_x, _y) ((_y) ? ((double)(_x) / (_y)) * 100 : 0.0)
+
+#define CPU_FIELD(_type, _name, _desc, _format, _is_pct, _pct_of, _ver) \
+ do { \
+ printf("%-65s: " _format, verbose_field ? _desc : #_name, \
+ cs->_ver._name); \
+ if (_is_pct) { \
+ printf(" ( %8.2lf%% )", \
+ CALC_PCT(cs->_ver._name, cs->_ver._pct_of)); \
+ } \
+ printf("\n"); \
+ } while (0)
+
+ if (cs->version == 15) {
+#include <perf/schedstat-v15.h>
+ } else if (cs->version == 16) {
+#include <perf/schedstat-v16.h>
+ } else if (cs->version == 17) {
+#include <perf/schedstat-v17.h>
+ }
+
+#undef CPU_FIELD
+#undef CALC_PCT
+}
+
+static inline void print_domain_stats(struct perf_record_schedstat_domain *ds,
+ __u64 jiffies)
+{
+ printf("%-65s %12s %14s\n", "DESC", "COUNT", "AVG_JIFFIES");
+
+#define DOMAIN_CATEGORY(_desc) \
+ do { \
+ size_t _len = strlen(_desc); \
+ size_t _pre_dash_cnt = (100 - _len) / 2; \
+ size_t _post_dash_cnt = 100 - _len - _pre_dash_cnt; \
+ print_separator((int)_pre_dash_cnt, _desc, (int)_post_dash_cnt);\
+ } while (0)
+
+#define CALC_AVG(_x, _y) ((_y) ? (long double)(_x) / (_y) : 0.0)
+
+#define DOMAIN_FIELD(_type, _name, _desc, _format, _is_jiffies, _ver) \
+ do { \
+ printf("%-65s: " _format, verbose_field ? _desc : #_name, \
+ ds->_ver._name); \
+ if (_is_jiffies) { \
+ printf(" $ %11.2Lf $", \
+ CALC_AVG(jiffies, ds->_ver._name)); \
+ } \
+ printf("\n"); \
+ } while (0)
+
+#define DERIVED_CNT_FIELD(_name, _desc, _format, _x, _y, _z, _ver) \
+ printf("*%-64s: " _format "\n", verbose_field ? _desc : #_name, \
+ (ds->_ver._x) - (ds->_ver._y) - (ds->_ver._z))
+
+#define DERIVED_AVG_FIELD(_name, _desc, _format, _x, _y, _z, _w, _ver) \
+ printf("*%-64s: " _format "\n", verbose_field ? _desc : #_name, \
+ CALC_AVG(ds->_ver._w, \
+ ((ds->_ver._x) - (ds->_ver._y) - (ds->_ver._z))))
+
+ if (ds->version == 15) {
+#include <perf/schedstat-v15.h>
+ } else if (ds->version == 16) {
+#include <perf/schedstat-v16.h>
+ } else if (ds->version == 17) {
+#include <perf/schedstat-v17.h>
+ }
+
+#undef DERIVED_AVG_FIELD
+#undef DERIVED_CNT_FIELD
+#undef DOMAIN_FIELD
+#undef CALC_AVG
+#undef DOMAIN_CATEGORY
+}
+
+static void summarize_schedstat_cpu(struct schedstat_cpu *summary_cpu,
+ struct schedstat_cpu *cptr,
+ int cnt, bool is_last)
+{
+ struct perf_record_schedstat_cpu *summary_cs = summary_cpu->cpu_data,
+ *temp_cs = cptr->cpu_data;
+
+#define CPU_FIELD(_type, _name, _desc, _format, _is_pct, _pct_of, _ver) \
+ do { \
+ summary_cs->_ver._name += temp_cs->_ver._name; \
+ if (is_last) \
+ summary_cs->_ver._name /= cnt; \
+ } while (0)
+
+ if (cptr->cpu_data->version == 15) {
+#include <perf/schedstat-v15.h>
+ } else if (cptr->cpu_data->version == 16) {
+#include <perf/schedstat-v16.h>
+ } else if (cptr->cpu_data->version == 17) {
+#include <perf/schedstat-v17.h>
+ }
+#undef CPU_FIELD
+}
+
+static void summarize_schedstat_domain(struct schedstat_domain *summary_domain,
+ struct schedstat_domain *dptr,
+ int cnt, bool is_last)
+{
+ struct perf_record_schedstat_domain *summary_ds = summary_domain->domain_data,
+ *temp_ds = dptr->domain_data;
+
+#define DOMAIN_FIELD(_type, _name, _desc, _format, _is_jiffies, _ver) \
+ do { \
+ summary_ds->_ver._name += temp_ds->_ver._name; \
+ if (is_last) \
+ summary_ds->_ver._name /= cnt; \
+ } while (0)
+
+ if (dptr->domain_data->version == 15) {
+#include <perf/schedstat-v15.h>
+ } else if (dptr->domain_data->version == 16) {
+#include <perf/schedstat-v16.h>
+ } else if (dptr->domain_data->version == 17) {
+#include <perf/schedstat-v17.h>
+ }
+#undef DOMAIN_FIELD
+}
+
+static int get_all_cpu_stats(struct list_head *head)
+{
+ struct schedstat_cpu *cptr = list_first_entry(head, struct schedstat_cpu, cpu_list);
+ struct schedstat_cpu *summary_head = NULL;
+ struct perf_record_schedstat_domain *ds;
+ struct perf_record_schedstat_cpu *cs;
+ struct schedstat_domain *dptr, *tdptr;
+ bool is_last = false;
+ int cnt = 1;
+ int ret = 0;
+
+ if (cptr) {
+ summary_head = zalloc(sizeof(*summary_head));
+ if (!summary_head)
+ return -ENOMEM;
+
+ summary_head->cpu_data = zalloc(sizeof(*cs));
+ memcpy(summary_head->cpu_data, cptr->cpu_data, sizeof(*cs));
+
+ INIT_LIST_HEAD(&summary_head->domain_head);
+
+ list_for_each_entry(dptr, &cptr->domain_head, domain_list) {
+ tdptr = zalloc(sizeof(*tdptr));
+ if (!tdptr)
+ return -ENOMEM;
+
+ tdptr->domain_data = zalloc(sizeof(*ds));
+ if (!tdptr->domain_data)
+ return -ENOMEM;
+
+ memcpy(tdptr->domain_data, dptr->domain_data, sizeof(*ds));
+ list_add_tail(&tdptr->domain_list, &summary_head->domain_head);
+ }
+ }
+
+
+ list_for_each_entry(cptr, head, cpu_list) {
+ if (list_is_first(&cptr->cpu_list, head))
+ continue;
+
+ if (list_is_last(&cptr->cpu_list, head))
+ is_last = true;
+
+ cnt++;
+ summarize_schedstat_cpu(summary_head, cptr, cnt, is_last);
+ tdptr = list_first_entry(&summary_head->domain_head, struct schedstat_domain,
+ domain_list);
+
+ list_for_each_entry(dptr, &cptr->domain_head, domain_list) {
+ summarize_schedstat_domain(tdptr, dptr, cnt, is_last);
+ tdptr = list_next_entry(tdptr, domain_list);
+ }
+ }
+
+ list_add(&summary_head->cpu_list, head);
+
+ return ret;
+}
+
+static void print_field_description(struct schedstat_cpu *cptr)
+{
+#define CPU_FIELD(_type, _name, _desc, _format, _is_pct, _pct_of, _ver) \
+ printf("%-30s-> %s\n", #_name, _desc) \
+
+#define DOMAIN_CATEGORY(_desc) \
+ do { \
+ size_t _len = strlen(_desc); \
+ size_t _pre_dash_cnt = (100 - _len) / 2; \
+ size_t _post_dash_cnt = 100 - _len - _pre_dash_cnt; \
+ print_separator((int)_pre_dash_cnt, _desc, (int)_post_dash_cnt);\
+ } while (0)
+
+#define DOMAIN_FIELD(_type, _name, _desc, _format, _is_jiffies, _ver) \
+ printf("%-30s-> %s\n", #_name, _desc) \
+
+#define DERIVED_CNT_FIELD(_name, _desc, _format, _x, _y, _z, _ver) \
+ printf("*%-29s-> %s\n", #_name, _desc) \
+
+#define DERIVED_AVG_FIELD(_name, _desc, _format, _x, _y, _z, _w, _ver) \
+ printf("*%-29s-> %s\n", #_name, _desc) \
+
+ if (cptr->cpu_data->version == 15) {
+#include <perf/schedstat-v15.h>
+ } else if (cptr->cpu_data->version == 16) {
+#include <perf/schedstat-v16.h>
+ } else if (cptr->cpu_data->version == 17) {
+#include <perf/schedstat-v17.h>
+ }
+#undef CPU_FIELD
+#undef DOMAIN_CATEGORY
+#undef DERIVED_CNT_FIELD
+#undef DERIVED_AVG_FIELD
+#undef DOMAIN_FIELD
+}
+
+static int show_schedstat_data(struct list_head *head, struct cpu_domain_map **cd_map)
+{
+ struct schedstat_cpu *cptr = list_first_entry(head, struct schedstat_cpu, cpu_list);
+ __u64 jiffies = cptr->cpu_data->timestamp;
+ struct perf_record_schedstat_domain *ds;
+ struct perf_record_schedstat_cpu *cs;
+ struct schedstat_domain *dptr;
+ bool is_summary = true;
+ int ret = 0;
+
+ printf("Description\n");
+ print_separator(100, "", 0);
+ printf("%-30s-> %s\n", "DESC", "Description of the field");
+ printf("%-30s-> %s\n", "COUNT", "Value of the field");
+ printf("%-30s-> %s\n", "PCT_CHANGE", "Percent change with corresponding base value");
+ printf("%-30s-> %s\n", "AVG_JIFFIES",
+ "Avg time in jiffies between two consecutive occurrence of event");
+
+ if (!verbose_field) {
+ print_separator(100, "", 0);
+ print_field_description(cptr);
+ }
+
+ print_separator(100, "", 0);
+ printf("\n");
+
+ printf("%-65s: %11llu\n", "Time elapsed (in jiffies)", jiffies);
+ print_separator(100, "", 0);
+
+ ret = get_all_cpu_stats(head);
+
+ list_for_each_entry(cptr, head, cpu_list) {
+ cs = cptr->cpu_data;
+ printf("\n");
+ print_separator(100, "", 0);
+
+ if (is_summary)
+ printf("CPU <ALL CPUS SUMMARY>\n");
+ else
+ printf("CPU %d\n", cs->cpu);
+
+ print_separator(100, "", 0);
+ print_cpu_stats(cs);
+ print_separator(100, "", 0);
+
+ list_for_each_entry(dptr, &cptr->domain_head, domain_list) {
+ struct domain_info *dinfo;
+
+ ds = dptr->domain_data;
+ dinfo = cd_map[ds->cpu]->domains[ds->domain];
+ if (is_summary)
+ if (dinfo->dname)
+ printf("CPU <ALL CPUS SUMMARY>, DOMAIN %s\n", dinfo->dname);
+ else
+ printf("CPU <ALL CPUS SUMMARY>, DOMAIN %d\n", dinfo->domain);
+ else {
+ if (dinfo->dname)
+ printf("CPU %d, DOMAIN %s CPUS ", cs->cpu, dinfo->dname);
+ else
+ printf("CPU %d, DOMAIN %d CPUS ", cs->cpu, dinfo->domain);
+
+ printf("%s\n", dinfo->cpulist);
+ }
+ print_separator(100, "", 0);
+ print_domain_stats(ds, jiffies);
+ print_separator(100, "", 0);
+ }
+ is_summary = false;
+ }
+ return ret;
+}
+
+static int perf_sched__process_schedstat(struct perf_session *session __maybe_unused,
+ union perf_event *event)
+{
+ struct perf_cpu this_cpu;
+ static __u32 initial_cpu;
+
+ switch (event->header.type) {
+ case PERF_RECORD_SCHEDSTAT_CPU:
+ this_cpu.cpu = event->schedstat_cpu.cpu;
+ break;
+ case PERF_RECORD_SCHEDSTAT_DOMAIN:
+ this_cpu.cpu = event->schedstat_domain.cpu;
+ break;
+ default:
+ return 0;
+ }
+
+ if (user_requested_cpus && !perf_cpu_map__has(user_requested_cpus, this_cpu))
+ return 0;
+
+ if (event->header.type == PERF_RECORD_SCHEDSTAT_CPU) {
+ struct schedstat_cpu *temp = zalloc(sizeof(*temp));
+
+ if (!temp)
+ return -ENOMEM;
+
+ temp->cpu_data = zalloc(sizeof(*temp->cpu_data));
+ if (!temp->cpu_data)
+ return -ENOMEM;
+
+ memcpy(temp->cpu_data, &event->schedstat_cpu, sizeof(*temp->cpu_data));
+
+ if (!list_empty(&cpu_head) && temp->cpu_data->cpu == initial_cpu)
+ after_workload_flag = true;
+
+ if (!after_workload_flag) {
+ if (list_empty(&cpu_head))
+ initial_cpu = temp->cpu_data->cpu;
+
+ list_add_tail(&temp->cpu_list, &cpu_head);
+ INIT_LIST_HEAD(&temp->domain_head);
+ } else {
+ if (temp->cpu_data->cpu == initial_cpu) {
+ cpu_second_pass = list_first_entry(&cpu_head, struct schedstat_cpu,
+ cpu_list);
+ cpu_second_pass->cpu_data->timestamp =
+ temp->cpu_data->timestamp - cpu_second_pass->cpu_data->timestamp;
+ } else {
+ cpu_second_pass = list_next_entry(cpu_second_pass, cpu_list);
+ }
+ domain_second_pass = list_first_entry(&cpu_second_pass->domain_head,
+ struct schedstat_domain, domain_list);
+ store_schedtstat_cpu_diff(temp);
+ free(temp);
+ }
+ } else if (event->header.type == PERF_RECORD_SCHEDSTAT_DOMAIN) {
+ struct schedstat_cpu *cpu_tail;
+ struct schedstat_domain *temp = zalloc(sizeof(*temp));
+
+ if (!temp)
+ return -ENOMEM;
+
+ temp->domain_data = zalloc(sizeof(*temp->domain_data));
+ if (!temp->domain_data)
+ return -ENOMEM;
+
+ memcpy(temp->domain_data, &event->schedstat_domain, sizeof(*temp->domain_data));
+
+ if (!after_workload_flag) {
+ cpu_tail = list_last_entry(&cpu_head, struct schedstat_cpu, cpu_list);
+ list_add_tail(&temp->domain_list, &cpu_tail->domain_head);
+ } else {
+ store_schedstat_domain_diff(temp);
+ domain_second_pass = list_next_entry(domain_second_pass, domain_list);
+ free(temp);
+ }
+ }
+
+ return 0;
+}
+
+static void free_schedstat(struct list_head *head)
+{
+ struct schedstat_domain *dptr, *n1;
+ struct schedstat_cpu *cptr, *n2;
+
+ list_for_each_entry_safe(cptr, n2, head, cpu_list) {
+ list_for_each_entry_safe(dptr, n1, &cptr->domain_head, domain_list) {
+ list_del_init(&dptr->domain_list);
+ free(dptr);
+ }
+ list_del_init(&cptr->cpu_list);
+ free(cptr);
+ }
+}
+
+static int perf_sched__schedstat_report(struct perf_sched *sched)
+{
+ struct cpu_domain_map **cd_map;
+ struct perf_session *session;
+ struct target target = {};
+ struct perf_data data = {
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ };
+ int err = 0;
+
+ sched->tool.schedstat_cpu = perf_sched__process_schedstat;
+ sched->tool.schedstat_domain = perf_sched__process_schedstat;
+
+ session = perf_session__new(&data, &sched->tool);
+ if (IS_ERR(session)) {
+ pr_err("Perf session creation failed.\n");
+ return PTR_ERR(session);
+ }
+
+ if (cpu_list)
+ target.cpu_list = cpu_list;
+ else
+ target.system_wide = true;
+
+ err = evlist__create_maps(session->evlist, &target);
+ if (err < 0)
+ goto out;
+
+ user_requested_cpus = session->evlist->core.user_requested_cpus;
+
+ err = perf_session__process_events(session);
+
+ if (!err) {
+ setup_pager();
+
+ if (list_empty(&cpu_head)) {
+ pr_err("Data is not available\n");
+ err = -1;
+ goto out;
+ }
+
+ cd_map = session->header.env.cpu_domain;
+ err = show_schedstat_data(&cpu_head, cd_map);
+ }
+
+out:
+ free_schedstat(&cpu_head);
+ perf_session__delete(session);
+ return err;
+}
+
static bool schedstat_events_exposed(void)
{
/*
@@ -4117,9 +4616,12 @@ int cmd_sched(int argc, const char **argv)
OPT_PARENT(sched_options)
};
const struct option stats_options[] = {
+ OPT_STRING('i', "input", &input_name, "file",
+ "`stats report` with input filename"),
OPT_STRING('o', "output", &output_name, "file",
"`stats record` with output filename"),
OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
+ OPT_BOOLEAN('v', "verbose", &verbose_field, "Show explanation for fields in the report"),
OPT_END()
};
@@ -4244,7 +4746,7 @@ int cmd_sched(int argc, const char **argv)
if (!ret)
ret = perf_sched__timehist(&sched);
} else if (!strcmp(argv[0], "stats")) {
- const char *const stats_subcommands[] = {"record", NULL};
+ const char *const stats_subcommands[] = {"record", "report", NULL};
argc = parse_options_subcommand(argc, argv, stats_options,
stats_subcommands,
@@ -4256,6 +4758,11 @@ int cmd_sched(int argc, const char **argv)
argc = parse_options(argc, argv, stats_options,
stats_usage, 0);
return perf_sched__schedstat_record(&sched, argc, argv);
+ } else if (argv[0] && !strcmp(argv[0], "report")) {
+ if (argc)
+ argc = parse_options(argc, argv, stats_options,
+ stats_usage, 0);
+ return perf_sched__schedstat_report(&sched);
}
usage_with_options(stats_usage, stats_options);
} else {
--
2.43.0
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH RESEND v4 08/11] perf sched stats: Add support for live mode
2025-09-09 11:42 [PATCH RESEND v4 00/11] perf sched: Introduce stats tool Swapnil Sapkal
` (6 preceding siblings ...)
2025-09-09 11:42 ` [PATCH RESEND v4 07/11] perf sched stats: Add support for report subcommand Swapnil Sapkal
@ 2025-09-09 11:42 ` Swapnil Sapkal
2025-09-09 11:42 ` [PATCH RESEND v4 09/11] perf sched stats: Add support for diff subcommand Swapnil Sapkal
` (2 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: Swapnil Sapkal @ 2025-09-09 11:42 UTC (permalink / raw)
To: peterz, mingo, acme, namhyung, irogers, james.clark
Cc: ravi.bangoria, swapnil.sapkal, yu.c.chen, mark.rutland,
alexander.shishkin, jolsa, rostedt, vincent.guittot,
adrian.hunter, kan.liang, gautham.shenoy, kprateek.nayak,
juri.lelli, yangjihong, void, tj, sshegde, ctshao, quic_zhonhan,
thomas.falcon, blakejones, ashelat, leo.yan, dvyukov, ak,
yujie.liu, graham.woodward, ben.gainey, vineethr, tim.c.chen,
linux, linux-kernel, linux-perf-users, santosh.shukla,
sandipan.das, James Clark
The live mode works similar to simple `perf stat` command, by profiling
the target and printing results on the terminal as soon as the target
finishes.
Example usage:
# perf sched stats -- sleep 10
Co-developed-by: Ravi Bangoria <ravi.bangoria@amd.com>
Signed-off-by: Ravi Bangoria <ravi.bangoria@amd.com>
Tested-by: James Clark <james.clark@linaro.org>
Signed-off-by: Swapnil Sapkal <swapnil.sapkal@amd.com>
---
tools/perf/builtin-sched.c | 99 +++++++++++++++++++++++++++++++++++++-
| 6 +--
| 5 ++
3 files changed, 106 insertions(+), 4 deletions(-)
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index e23018798f5b..ce04349cc4ff 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -4439,6 +4439,103 @@ static int perf_sched__schedstat_report(struct perf_sched *sched)
return err;
}
+static int process_synthesized_event_live(const struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ return perf_sched__process_schedstat(NULL, event);
+}
+
+static int perf_sched__schedstat_live(struct perf_sched *sched,
+ int argc, const char **argv)
+{
+ struct cpu_domain_map **cd_map = NULL;
+ struct target target = {};
+ u32 __maybe_unused md;
+ struct evlist *evlist;
+ u32 nr = 0, sv;
+ int reset = 0;
+ int err = 0;
+
+ signal(SIGINT, sighandler);
+ signal(SIGCHLD, sighandler);
+ signal(SIGTERM, sighandler);
+
+ evlist = evlist__new();
+ if (!evlist)
+ return -ENOMEM;
+
+ /*
+ * `perf sched schedstat` does not support workload profiling (-p pid)
+ * since /proc/schedstat file contains cpu specific data only. Hence, a
+ * profile target is either set of cpus or systemwide, never a process.
+ * Note that, although `-- <workload>` is supported, profile data are
+ * still cpu/systemwide.
+ */
+ if (cpu_list)
+ target.cpu_list = cpu_list;
+ else
+ target.system_wide = true;
+
+ if (argc) {
+ err = evlist__prepare_workload(evlist, &target, argv, false, NULL);
+ if (err)
+ goto out;
+ }
+
+ err = evlist__create_maps(evlist, &target);
+ if (err < 0)
+ goto out;
+
+ user_requested_cpus = evlist->core.user_requested_cpus;
+
+ err = perf_event__synthesize_schedstat(&(sched->tool),
+ process_synthesized_event_live,
+ user_requested_cpus);
+ if (err < 0)
+ goto out;
+
+ err = enable_sched_schedstats(&reset);
+ if (err < 0)
+ goto out;
+
+ if (argc)
+ evlist__start_workload(evlist);
+
+ /* wait for signal */
+ pause();
+
+ if (reset) {
+ err = disable_sched_schedstat();
+ if (err < 0)
+ goto out;
+ }
+
+ err = perf_event__synthesize_schedstat(&(sched->tool),
+ process_synthesized_event_live,
+ user_requested_cpus);
+ if (err)
+ goto out;
+
+ setup_pager();
+
+ if (list_empty(&cpu_head)) {
+ pr_err("Data is not available\n");
+ err = -1;
+ goto out;
+ }
+
+ nr = cpu__max_present_cpu().cpu;
+ cd_map = build_cpu_domain_map(&sv, &md, nr);
+ show_schedstat_data(&cpu_head, cd_map);
+out:
+ free_cpu_domain_info(cd_map, sv, nr);
+ free_schedstat(&cpu_head);
+ evlist__delete(evlist);
+ return err;
+}
+
static bool schedstat_events_exposed(void)
{
/*
@@ -4764,7 +4861,7 @@ int cmd_sched(int argc, const char **argv)
stats_usage, 0);
return perf_sched__schedstat_report(&sched);
}
- usage_with_options(stats_usage, stats_options);
+ return perf_sched__schedstat_live(&sched, argc, argv);
} else {
usage_with_options(sched_usage, sched_options);
}
--git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 7ff7434bac2c..e8f4d00b5261 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1621,7 +1621,7 @@ static int write_pmu_caps(struct feat_fd *ff,
return 0;
}
-static void free_cpu_domain_info(struct cpu_domain_map **cd_map, u32 schedstat_version, u32 nr)
+void free_cpu_domain_info(struct cpu_domain_map **cd_map, u32 schedstat_version, u32 nr)
{
for (u32 i = 0; i < nr; i++) {
if (cd_map[i]->domains) {
@@ -1641,8 +1641,8 @@ static void free_cpu_domain_info(struct cpu_domain_map **cd_map, u32 schedstat_v
free(cd_map);
}
-static struct cpu_domain_map **build_cpu_domain_map(u32 *schedstat_version, u32 *max_sched_domains,
- u32 nr)
+struct cpu_domain_map **build_cpu_domain_map(u32 *schedstat_version, u32 *max_sched_domains,
+ u32 nr)
{
struct domain_info *domain_info;
struct cpu_domain_map **cd_map;
--git a/tools/perf/util/header.h b/tools/perf/util/header.h
index edcb95e0dc49..d67d26dad88e 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -209,4 +209,9 @@ char *get_cpuid_str(struct perf_cpu cpu);
char *get_cpuid_allow_env_override(struct perf_cpu cpu);
int strcmp_cpuid_str(const char *s1, const char *s2);
+
+struct cpu_domain_map **build_cpu_domain_map(u32 *schedstat_version, u32 *max_sched_domains,
+ u32 nr);
+
+void free_cpu_domain_info(struct cpu_domain_map **cd_map, u32 schedstat_version, u32 nr);
#endif /* __PERF_HEADER_H */
--
2.43.0
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH RESEND v4 09/11] perf sched stats: Add support for diff subcommand
2025-09-09 11:42 [PATCH RESEND v4 00/11] perf sched: Introduce stats tool Swapnil Sapkal
` (7 preceding siblings ...)
2025-09-09 11:42 ` [PATCH RESEND v4 08/11] perf sched stats: Add support for live mode Swapnil Sapkal
@ 2025-09-09 11:42 ` Swapnil Sapkal
2025-09-09 11:42 ` [PATCH RESEND v4 10/11] perf sched stats: Add basic perf sched stats test Swapnil Sapkal
2025-09-09 11:42 ` [PATCH RESEND v4 11/11] perf sched stats: Add details in man page Swapnil Sapkal
10 siblings, 0 replies; 13+ messages in thread
From: Swapnil Sapkal @ 2025-09-09 11:42 UTC (permalink / raw)
To: peterz, mingo, acme, namhyung, irogers, james.clark
Cc: ravi.bangoria, swapnil.sapkal, yu.c.chen, mark.rutland,
alexander.shishkin, jolsa, rostedt, vincent.guittot,
adrian.hunter, kan.liang, gautham.shenoy, kprateek.nayak,
juri.lelli, yangjihong, void, tj, sshegde, ctshao, quic_zhonhan,
thomas.falcon, blakejones, ashelat, leo.yan, dvyukov, ak,
yujie.liu, graham.woodward, ben.gainey, vineethr, tim.c.chen,
linux, linux-kernel, linux-perf-users, santosh.shukla,
sandipan.das
`perf sched stats diff` subcommand will take two perf.data files as an
input and it will print the diff between the two perf.data files. The
default input to this subcommnd is perf.data.old and perf.data.
Example usage:
# perf sched stats diff sample1.data sample2.data
Signed-off-by: Ravi Bangoria <ravi.bangoria@amd.com>
Signed-off-by: Swapnil Sapkal <swapnil.sapkal@amd.com>
---
tools/perf/builtin-sched.c | 315 ++++++++++++++++++++++++++++++-------
1 file changed, 258 insertions(+), 57 deletions(-)
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index ce04349cc4ff..ebce69180330 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -3996,29 +3996,46 @@ static void store_schedstat_domain_diff(struct schedstat_domain *after_workload)
#undef DOMAIN_FIELD
}
-static inline void print_cpu_stats(struct perf_record_schedstat_cpu *cs)
+#define PCT_CHNG(_x, _y) ((_x) ? ((double)((double)(_y) - (_x)) / (_x)) * 100 : 0.0)
+static inline void print_cpu_stats(struct perf_record_schedstat_cpu *cs1,
+ struct perf_record_schedstat_cpu *cs2)
{
- printf("%-65s %12s %12s\n", "DESC", "COUNT", "PCT_CHANGE");
- printf("%.*s\n", 100, graph_dotted_line);
+ printf("%-65s ", "DESC");
+ if (!cs2)
+ printf("%12s %12s", "COUNT", "PCT_CHANGE");
+ else
+ printf("%12s %11s %12s %14s %10s", "COUNT1", "COUNT2", "PCT_CHANGE",
+ "PCT_CHANGE1", "PCT_CHANGE2");
+
+ printf("\n");
+ print_separator(100, "", 0);
#define CALC_PCT(_x, _y) ((_y) ? ((double)(_x) / (_y)) * 100 : 0.0)
-#define CPU_FIELD(_type, _name, _desc, _format, _is_pct, _pct_of, _ver) \
- do { \
- printf("%-65s: " _format, verbose_field ? _desc : #_name, \
- cs->_ver._name); \
- if (_is_pct) { \
- printf(" ( %8.2lf%% )", \
- CALC_PCT(cs->_ver._name, cs->_ver._pct_of)); \
- } \
- printf("\n"); \
+#define CPU_FIELD(_type, _name, _desc, _format, _is_pct, _pct_of, _ver) \
+ do { \
+ printf("%-65s: " _format, verbose_field ? _desc : #_name, \
+ cs1->_ver._name); \
+ if (!cs2) { \
+ if (_is_pct) \
+ printf(" ( %8.2lf%% )", \
+ CALC_PCT(cs1->_ver._name, cs1->_ver._pct_of)); \
+ } else { \
+ printf("," _format " | %8.2lf%% |", cs2->_ver._name, \
+ PCT_CHNG(cs1->_ver._name, cs2->_ver._name)); \
+ if (_is_pct) \
+ printf(" ( %8.2lf%%, %8.2lf%% )", \
+ CALC_PCT(cs1->_ver._name, cs1->_ver._pct_of), \
+ CALC_PCT(cs2->_ver._name, cs2->_ver._pct_of)); \
+ } \
+ printf("\n"); \
} while (0)
- if (cs->version == 15) {
+ if (cs1->version == 15) {
#include <perf/schedstat-v15.h>
- } else if (cs->version == 16) {
+ } else if (cs1->version == 16) {
#include <perf/schedstat-v16.h>
- } else if (cs->version == 17) {
+ } else if (cs1->version == 17) {
#include <perf/schedstat-v17.h>
}
@@ -4026,10 +4043,17 @@ static inline void print_cpu_stats(struct perf_record_schedstat_cpu *cs)
#undef CALC_PCT
}
-static inline void print_domain_stats(struct perf_record_schedstat_domain *ds,
- __u64 jiffies)
+static inline void print_domain_stats(struct perf_record_schedstat_domain *ds1,
+ struct perf_record_schedstat_domain *ds2,
+ __u64 jiffies1, __u64 jiffies2)
{
- printf("%-65s %12s %14s\n", "DESC", "COUNT", "AVG_JIFFIES");
+ printf("%-65s ", "DESC");
+ if (!ds2)
+ printf("%12s %14s", "COUNT", "AVG_JIFFIES");
+ else
+ printf("%12s %11s %12s %16s %12s", "COUNT1", "COUNT2", "PCT_CHANGE",
+ "AVG_JIFFIES1", "AVG_JIFFIES2");
+ printf("\n");
#define DOMAIN_CATEGORY(_desc) \
do { \
@@ -4044,28 +4068,54 @@ static inline void print_domain_stats(struct perf_record_schedstat_domain *ds,
#define DOMAIN_FIELD(_type, _name, _desc, _format, _is_jiffies, _ver) \
do { \
printf("%-65s: " _format, verbose_field ? _desc : #_name, \
- ds->_ver._name); \
- if (_is_jiffies) { \
- printf(" $ %11.2Lf $", \
- CALC_AVG(jiffies, ds->_ver._name)); \
+ ds1->_ver._name); \
+ if (!ds2) { \
+ if (_is_jiffies) \
+ printf(" $ %11.2Lf $", \
+ CALC_AVG(jiffies1, ds1->_ver._name)); \
+ } else { \
+ printf("," _format " | %8.2lf%% |", ds2->_ver._name, \
+ PCT_CHNG(ds1->_ver._name, ds2->_ver._name)); \
+ if (_is_jiffies) \
+ printf(" $ %11.2Lf, %11.2Lf $", \
+ CALC_AVG(jiffies1, ds1->_ver._name), \
+ CALC_AVG(jiffies2, ds2->_ver._name)); \
} \
printf("\n"); \
} while (0)
#define DERIVED_CNT_FIELD(_name, _desc, _format, _x, _y, _z, _ver) \
- printf("*%-64s: " _format "\n", verbose_field ? _desc : #_name, \
- (ds->_ver._x) - (ds->_ver._y) - (ds->_ver._z))
+ do { \
+ __u32 t1 = ds1->_ver._x - ds1->_ver._y - ds1->_ver._z; \
+ printf("*%-64s: " _format, verbose_field ? _desc : #_name, t1); \
+ if (ds2) { \
+ __u32 t2 = ds2->_ver._x - ds2->_ver._y - ds2->_ver._z; \
+ printf("," _format " | %8.2lf%% |", t2, \
+ PCT_CHNG(t1, t2)); \
+ } \
+ printf("\n"); \
+ } while (0)
#define DERIVED_AVG_FIELD(_name, _desc, _format, _x, _y, _z, _w, _ver) \
- printf("*%-64s: " _format "\n", verbose_field ? _desc : #_name, \
- CALC_AVG(ds->_ver._w, \
- ((ds->_ver._x) - (ds->_ver._y) - (ds->_ver._z))))
+ do { \
+ __u32 t1 = ds1->_ver._x - ds1->_ver._y - ds1->_ver._z; \
+ printf("*%-64s: " _format, verbose_field ? _desc : #_name, \
+ CALC_AVG(ds1->_ver._w, t1)); \
+ if (ds2) { \
+ __u32 t2 = ds2->_ver._x - ds2->_ver._y - ds2->_ver._z; \
+ printf("," _format " | %8.2Lf%% |", \
+ CALC_AVG(ds2->_ver._w, t2), \
+ PCT_CHNG(CALC_AVG(ds1->_ver._w, t1), \
+ CALC_AVG(ds2->_ver._w, t2))); \
+ } \
+ printf("\n"); \
+ } while (0)
- if (ds->version == 15) {
+ if (ds1->version == 15) {
#include <perf/schedstat-v15.h>
- } else if (ds->version == 16) {
+ } else if (ds1->version == 16) {
#include <perf/schedstat-v16.h>
- } else if (ds->version == 17) {
+ } else if (ds1->version == 17) {
#include <perf/schedstat-v17.h>
}
@@ -4075,6 +4125,7 @@ static inline void print_domain_stats(struct perf_record_schedstat_domain *ds,
#undef CALC_AVG
#undef DOMAIN_CATEGORY
}
+#undef PCT_CHNG
static void summarize_schedstat_cpu(struct schedstat_cpu *summary_cpu,
struct schedstat_cpu *cptr,
@@ -4219,13 +4270,16 @@ static void print_field_description(struct schedstat_cpu *cptr)
#undef DOMAIN_FIELD
}
-static int show_schedstat_data(struct list_head *head, struct cpu_domain_map **cd_map)
+static int show_schedstat_data(struct list_head *head1, struct cpu_domain_map **cd_map1,
+ struct list_head *head2, struct cpu_domain_map **cd_map2,
+ bool summary_only)
{
- struct schedstat_cpu *cptr = list_first_entry(head, struct schedstat_cpu, cpu_list);
- __u64 jiffies = cptr->cpu_data->timestamp;
- struct perf_record_schedstat_domain *ds;
- struct perf_record_schedstat_cpu *cs;
- struct schedstat_domain *dptr;
+ struct schedstat_cpu *cptr1 = list_first_entry(head1, struct schedstat_cpu, cpu_list);
+ struct perf_record_schedstat_domain *ds1 = NULL, *ds2 = NULL;
+ struct perf_record_schedstat_cpu *cs1 = NULL, *cs2 = NULL;
+ struct schedstat_domain *dptr1 = NULL, *dptr2 = NULL;
+ struct schedstat_cpu *cptr2 = NULL;
+ __u64 jiffies1 = 0, jiffies2 = 0;
bool is_summary = true;
int ret = 0;
@@ -4239,53 +4293,103 @@ static int show_schedstat_data(struct list_head *head, struct cpu_domain_map **c
if (!verbose_field) {
print_separator(100, "", 0);
- print_field_description(cptr);
+ print_field_description(cptr1);
}
print_separator(100, "", 0);
printf("\n");
- printf("%-65s: %11llu\n", "Time elapsed (in jiffies)", jiffies);
+ printf("%-65s: ", "Time elapsed (in jiffies)");
+ jiffies1 = cptr1->cpu_data->timestamp;
+ printf("%11llu", jiffies1);
+ if (head2) {
+ cptr2 = list_first_entry(head2, struct schedstat_cpu, cpu_list);
+ jiffies2 = cptr2->cpu_data->timestamp;
+ printf(",%11llu", jiffies2);
+ }
+ printf("\n");
+
print_separator(100, "", 0);
- ret = get_all_cpu_stats(head);
+ ret = get_all_cpu_stats(head1);
+ if (cptr2)
+ ret = get_all_cpu_stats(head2);
+
+ list_for_each_entry(cptr1, head1, cpu_list) {
+ struct cpu_domain_map *cd_info1 = NULL, *cd_info2 = NULL;
+
+ cs1 = cptr1->cpu_data;
+ cd_info1 = cd_map1[cs1->cpu];
+ if (cptr2) {
+ cs2 = cptr2->cpu_data;
+ cd_info2 = cd_map2[cs2->cpu];
+ dptr2 = list_first_entry(&cptr2->domain_head, struct schedstat_domain,
+ domain_list);
+ }
+
+ if (cs2 && cs1->cpu != cs2->cpu) {
+ pr_err("Failed because matching cpus not found for diff\n");
+ return -1;
+ }
+
+ if (cd_info2 && cd_info1->nr_domains != cd_info2->nr_domains) {
+ pr_err("Failed because nr_domains is not same for cpus\n");
+ return -1;
+ }
- list_for_each_entry(cptr, head, cpu_list) {
- cs = cptr->cpu_data;
printf("\n");
print_separator(100, "", 0);
if (is_summary)
printf("CPU <ALL CPUS SUMMARY>\n");
else
- printf("CPU %d\n", cs->cpu);
+ printf("CPU %d\n", cs1->cpu);
print_separator(100, "", 0);
- print_cpu_stats(cs);
+ print_cpu_stats(cs1, cs2);
print_separator(100, "", 0);
- list_for_each_entry(dptr, &cptr->domain_head, domain_list) {
- struct domain_info *dinfo;
+ list_for_each_entry(dptr1, &cptr1->domain_head, domain_list) {
+ struct domain_info *dinfo1 = NULL, *dinfo2 = NULL;
+
+ ds1 = dptr1->domain_data;
+ dinfo1 = cd_info1->domains[ds1->domain];
+ if (dptr2) {
+ ds2 = dptr2->domain_data;
+ dinfo2 = cd_info2->domains[ds2->domain];
+ }
+
+ if (dinfo2 && dinfo1->domain != dinfo2->domain) {
+ pr_err("Failed because matching domain not found for diff\n");
+ return -1;
+ }
- ds = dptr->domain_data;
- dinfo = cd_map[ds->cpu]->domains[ds->domain];
if (is_summary)
- if (dinfo->dname)
- printf("CPU <ALL CPUS SUMMARY>, DOMAIN %s\n", dinfo->dname);
+ if (dinfo1->dname)
+ printf("CPU <ALL CPUS SUMMARY>, DOMAIN %s\n", dinfo1->dname);
else
- printf("CPU <ALL CPUS SUMMARY>, DOMAIN %d\n", dinfo->domain);
+ printf("CPU <ALL CPUS SUMMARY>, DOMAIN %d\n", dinfo1->domain);
else {
- if (dinfo->dname)
- printf("CPU %d, DOMAIN %s CPUS ", cs->cpu, dinfo->dname);
+ if (dinfo1->dname)
+ printf("CPU %d, DOMAIN %s CPUS ", cs1->cpu, dinfo1->dname);
else
- printf("CPU %d, DOMAIN %d CPUS ", cs->cpu, dinfo->domain);
+ printf("CPU %d, DOMAIN %d CPUS ", cs1->cpu, dinfo1->domain);
- printf("%s\n", dinfo->cpulist);
+ printf("%s\n", dinfo1->cpulist);
}
print_separator(100, "", 0);
- print_domain_stats(ds, jiffies);
+ print_domain_stats(ds1, ds2, jiffies1, jiffies2);
print_separator(100, "", 0);
+
+ if (dptr2)
+ dptr2 = list_next_entry(dptr2, domain_list);
}
+ if (summary_only)
+ break;
+
+ if (cptr2)
+ cptr2 = list_next_entry(cptr2, cpu_list);
+
is_summary = false;
}
return ret;
@@ -4430,7 +4534,7 @@ static int perf_sched__schedstat_report(struct perf_sched *sched)
}
cd_map = session->header.env.cpu_domain;
- err = show_schedstat_data(&cpu_head, cd_map);
+ err = show_schedstat_data(&cpu_head, cd_map, NULL, NULL, false);
}
out:
@@ -4439,6 +4543,98 @@ static int perf_sched__schedstat_report(struct perf_sched *sched)
return err;
}
+static int perf_sched__schedstat_diff(struct perf_sched *sched,
+ int argc, const char **argv)
+{
+ struct list_head *cpu_head_ses0 = NULL, *cpu_head_ses1 = NULL;
+ struct cpu_domain_map **cd_map0 = NULL, **cd_map1 = NULL;
+ struct perf_session *session[2];
+ struct perf_data data[2];
+ int ret = 0, err = 0;
+ static const char *defaults[] = {
+ "perf.data.old",
+ "perf.data",
+ };
+
+ if (argc) {
+ if (argc == 1)
+ defaults[1] = argv[0];
+ else if (argc == 2) {
+ defaults[0] = argv[0];
+ defaults[1] = argv[1];
+ } else {
+ pr_err("perf sched stats diff is not supported with more than 2 files.\n");
+ goto out_ret;
+ }
+ }
+
+ sched->tool.schedstat_cpu = perf_sched__process_schedstat;
+ sched->tool.schedstat_domain = perf_sched__process_schedstat;
+
+ data[0].path = defaults[0];
+ data[0].mode = PERF_DATA_MODE_READ;
+ session[0] = perf_session__new(&data[0], &sched->tool);
+ if (IS_ERR(session[0])) {
+ ret = PTR_ERR(session[0]);
+ pr_err("Failed to open %s\n", data[0].path);
+ goto out_delete_ses0;
+ }
+
+ err = perf_session__process_events(session[0]);
+ if (err)
+ goto out_delete_ses0;
+
+ cd_map0 = session[0]->header.env.cpu_domain;
+ cpu_head_ses0 = &cpu_head;
+ after_workload_flag = false;
+ INIT_LIST_HEAD(&cpu_head);
+
+ data[1].path = defaults[1];
+ data[1].mode = PERF_DATA_MODE_READ;
+ session[1] = perf_session__new(&data[1], &sched->tool);
+ if (IS_ERR(session[1])) {
+ ret = PTR_ERR(session[1]);
+ pr_err("Failed to open %s\n", data[1].path);
+ goto out_delete_ses1;
+ }
+
+ err = perf_session__process_events(session[1]);
+ if (err)
+ goto out_delete_ses1;
+
+ cd_map1 = session[1]->header.env.cpu_domain;
+ cpu_head_ses1 = &cpu_head;
+ after_workload_flag = false;
+ setup_pager();
+
+ if (list_empty(cpu_head_ses1)) {
+ pr_err("Data is not available\n");
+ ret = -1;
+ goto out_delete_ses1;
+ }
+
+ if (list_empty(cpu_head_ses0)) {
+ pr_err("Data is not available\n");
+ ret = -1;
+ goto out_delete_ses0;
+ }
+
+ show_schedstat_data(cpu_head_ses0, cd_map0, cpu_head_ses1, cd_map1, true);
+
+out_delete_ses1:
+ free_schedstat(cpu_head_ses1);
+ if (!IS_ERR(session[1]))
+ perf_session__delete(session[1]);
+
+out_delete_ses0:
+ free_schedstat(cpu_head_ses0);
+ if (!IS_ERR(session[0]))
+ perf_session__delete(session[0]);
+
+out_ret:
+ return ret;
+}
+
static int process_synthesized_event_live(const struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
@@ -4528,7 +4724,7 @@ static int perf_sched__schedstat_live(struct perf_sched *sched,
nr = cpu__max_present_cpu().cpu;
cd_map = build_cpu_domain_map(&sv, &md, nr);
- show_schedstat_data(&cpu_head, cd_map);
+ show_schedstat_data(&cpu_head, cd_map, NULL, NULL, false);
out:
free_cpu_domain_info(cd_map, sv, nr);
free_schedstat(&cpu_head);
@@ -4860,6 +5056,11 @@ int cmd_sched(int argc, const char **argv)
argc = parse_options(argc, argv, stats_options,
stats_usage, 0);
return perf_sched__schedstat_report(&sched);
+ } else if (argv[0] && !strcmp(argv[0], "diff")) {
+ if (argc)
+ argc = parse_options(argc, argv, stats_options,
+ stats_usage, 0);
+ return perf_sched__schedstat_diff(&sched, argc, argv);
}
return perf_sched__schedstat_live(&sched, argc, argv);
} else {
--
2.43.0
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH RESEND v4 10/11] perf sched stats: Add basic perf sched stats test
2025-09-09 11:42 [PATCH RESEND v4 00/11] perf sched: Introduce stats tool Swapnil Sapkal
` (8 preceding siblings ...)
2025-09-09 11:42 ` [PATCH RESEND v4 09/11] perf sched stats: Add support for diff subcommand Swapnil Sapkal
@ 2025-09-09 11:42 ` Swapnil Sapkal
2025-09-09 11:42 ` [PATCH RESEND v4 11/11] perf sched stats: Add details in man page Swapnil Sapkal
10 siblings, 0 replies; 13+ messages in thread
From: Swapnil Sapkal @ 2025-09-09 11:42 UTC (permalink / raw)
To: peterz, mingo, acme, namhyung, irogers, james.clark
Cc: ravi.bangoria, swapnil.sapkal, yu.c.chen, mark.rutland,
alexander.shishkin, jolsa, rostedt, vincent.guittot,
adrian.hunter, kan.liang, gautham.shenoy, kprateek.nayak,
juri.lelli, yangjihong, void, tj, sshegde, ctshao, quic_zhonhan,
thomas.falcon, blakejones, ashelat, leo.yan, dvyukov, ak,
yujie.liu, graham.woodward, ben.gainey, vineethr, tim.c.chen,
linux, linux-kernel, linux-perf-users, santosh.shukla,
sandipan.das
Add basic test for perf sched stats {record|report|diff} subcommand.
Signed-off-by: Swapnil Sapkal <swapnil.sapkal@amd.com>
---
tools/perf/tests/shell/perf_sched_stats.sh | 64 ++++++++++++++++++++++
1 file changed, 64 insertions(+)
create mode 100755 tools/perf/tests/shell/perf_sched_stats.sh
diff --git a/tools/perf/tests/shell/perf_sched_stats.sh b/tools/perf/tests/shell/perf_sched_stats.sh
new file mode 100755
index 000000000000..2b1410b050d0
--- /dev/null
+++ b/tools/perf/tests/shell/perf_sched_stats.sh
@@ -0,0 +1,64 @@
+#!/bin/sh
+# perf sched stats tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+test_perf_sched_stats_record() {
+ echo "Basic perf sched stats record test"
+ if ! perf sched stats record true 2>&1 | \
+ grep -E -q "[ perf sched stats: Wrote samples to perf.data ]"
+ then
+ echo "Basic perf sched stats record test [Failed]"
+ err=1
+ return
+ fi
+ echo "Basic perf sched stats record test [Success]"
+}
+
+test_perf_sched_stats_report() {
+ echo "Basic perf sched stats report test"
+ perf sched stats record true > /dev/null
+ if ! perf sched stats report 2>&1 | grep -E -q "Description"
+ then
+ echo "Basic perf sched stats report test [Failed]"
+ err=1
+ rm perf.data
+ return
+ fi
+ rm perf.data
+ echo "Basic perf sched stats report test [Success]"
+}
+
+test_perf_sched_stats_live() {
+ echo "Basic perf sched stats live mode test"
+ if ! perf sched stats true 2>&1 | grep -E -q "Description"
+ then
+ echo "Basic perf sched stats live mode test [Failed]"
+ err=1
+ return
+ fi
+ echo "Basic perf sched stats live mode test [Success]"
+}
+
+test_perf_sched_stats_diff() {
+ echo "Basic perf sched stats diff test"
+ perf sched stats record true > /dev/null
+ perf sched stats record true > /dev/null
+ if ! perf sched stats diff > /dev/null
+ then
+ echo "Basic perf sched stats diff test [Failed]"
+ err=1
+ rm perf.data.old perf.data
+ return
+ fi
+ rm perf.data.old perf.data
+ echo "Basic perf sched stats diff test [Success]"
+}
+
+test_perf_sched_stats_record
+test_perf_sched_stats_report
+test_perf_sched_stats_live
+test_perf_sched_stats_diff
+exit $err
--
2.43.0
^ permalink raw reply related [flat|nested] 13+ messages in thread* [PATCH RESEND v4 11/11] perf sched stats: Add details in man page
2025-09-09 11:42 [PATCH RESEND v4 00/11] perf sched: Introduce stats tool Swapnil Sapkal
` (9 preceding siblings ...)
2025-09-09 11:42 ` [PATCH RESEND v4 10/11] perf sched stats: Add basic perf sched stats test Swapnil Sapkal
@ 2025-09-09 11:42 ` Swapnil Sapkal
10 siblings, 0 replies; 13+ messages in thread
From: Swapnil Sapkal @ 2025-09-09 11:42 UTC (permalink / raw)
To: peterz, mingo, acme, namhyung, irogers, james.clark
Cc: ravi.bangoria, swapnil.sapkal, yu.c.chen, mark.rutland,
alexander.shishkin, jolsa, rostedt, vincent.guittot,
adrian.hunter, kan.liang, gautham.shenoy, kprateek.nayak,
juri.lelli, yangjihong, void, tj, sshegde, ctshao, quic_zhonhan,
thomas.falcon, blakejones, ashelat, leo.yan, dvyukov, ak,
yujie.liu, graham.woodward, ben.gainey, vineethr, tim.c.chen,
linux, linux-kernel, linux-perf-users, santosh.shukla,
sandipan.das
Document perf sched stats purpose, usage examples and guide on
how to interpret the report data in the perf-sched man page.
Signed-off-by: Ravi Bangoria <ravi.bangoria@amd.com>
Signed-off-by: Swapnil Sapkal <swapnil.sapkal@amd.com>
---
tools/perf/Documentation/perf-sched.txt | 261 +++++++++++++++++++++++-
1 file changed, 260 insertions(+), 1 deletion(-)
diff --git a/tools/perf/Documentation/perf-sched.txt b/tools/perf/Documentation/perf-sched.txt
index 6dbbddb6464d..5bfb7bb6c633 100644
--- a/tools/perf/Documentation/perf-sched.txt
+++ b/tools/perf/Documentation/perf-sched.txt
@@ -8,7 +8,7 @@ perf-sched - Tool to trace/measure scheduler properties (latencies)
SYNOPSIS
--------
[verse]
-'perf sched' {record|latency|map|replay|script|timehist}
+'perf sched' {record|latency|map|replay|script|timehist|stats}
DESCRIPTION
-----------
@@ -80,8 +80,267 @@ There are several variants of 'perf sched':
Times are in msec.usec.
+ 'perf sched stats {record | report | diff} <command>' to capture, report the diff
+ in schedstat counters and show the difference between perf sched stats report
+ respectively. schedstat counters which are present in the linux kernel and are
+ exposed through the file ``/proc/schedstat``. These counters are enabled or disabled
+ via the sysctl governed by the file ``/proc/sys/kernel/sched_schedstats``. These
+ counters accounts for many scheduler events such as ``schedule()`` calls, load-balancing
+ events, ``try_to_wakeup()`` call among others. This is useful in understading the
+ scheduler behavior for the workload.
+
+ Note: The tool will not give correct results if there is topological reordering or
+ online/offline of cpus in between capturing snapshots of `/proc/schedstat`.
+
+ Example usage:
+ perf sched stats record -- sleep 1
+ perf sched stats report
+ perf sched stats diff
+
+ A detailed description of the schedstats can be found in the Kernel Documentation:
+ https://www.kernel.org/doc/html/latest/scheduler/sched-stats.html
+
+ The result can be interprested as follows:
+
+ The `perf sched stats report` starts with description of the columns present in
+ the report. These column names are given before cpu and domain stats to improve
+ the readability of the report.
+
+ ----------------------------------------------------------------------------------------------------
+ DESC -> Description of the field
+ COUNT -> Value of the field
+ PCT_CHANGE -> Percent change with corresponding base value
+ AVG_JIFFIES -> Avg time in jiffies between two consecutive occurrence of event
+ ----------------------------------------------------------------------------------------------------
+
+ Next is the total profiling time in terms of jiffies:
+
+ ----------------------------------------------------------------------------------------------------
+ Time elapsed (in jiffies) : 24537
+ ----------------------------------------------------------------------------------------------------
+
+ Next is CPU scheduling statistics. These are simple diffs of /proc/schedstat CPU lines
+ along with description. The report also prints % relative to base stat.
+
+ In the example below, schedule() left the CPU0 idle 36.58% of the time. 0.45% of total
+ try_to_wake_up() was to wakeup local CPU. And, the total waittime by tasks on CPU0 is
+ 48.70% of the total runtime by tasks on the same CPU.
+
+ ----------------------------------------------------------------------------------------------------
+ CPU 0
+ ----------------------------------------------------------------------------------------------------
+ DESC COUNT PCT_CHANGE
+ ----------------------------------------------------------------------------------------------------
+ yld_count : 0
+ array_exp : 0
+ sched_count : 402267
+ sched_goidle : 147161 ( 36.58% )
+ ttwu_count : 236309
+ ttwu_local : 1062 ( 0.45% )
+ rq_cpu_time : 7083791148
+ run_delay : 3449973971 ( 48.70% )
+ pcount : 255035
+ ----------------------------------------------------------------------------------------------------
+
+ Next is load balancing statistics. For each of the sched domains
+ (eg: `SMT`, `MC`, `DIE`...), the scheduler computes statistics under
+ the following three categories:
+
+ 1) Idle Load Balance: Load balancing performed on behalf of a long
+ idling CPU by some other CPU.
+ 2) Busy Load Balance: Load balancing performed when the CPU was busy.
+ 3) New Idle Balance : Load balancing performed when a CPU just became
+ idle.
+
+ Under each of these three categories, sched stats report provides
+ different load balancing statistics. Along with direct stats, the
+ report also contains derived metrics prefixed with *. Example:
+
+ ----------------------------------------------------------------------------------------------------
+ CPU 0, DOMAIN SMT CPUS 0,64
+ ----------------------------------------------------------------------------------------------------
+ DESC COUNT AVG_JIFFIES
+ ----------------------------------------- <Category busy> ------------------------------------------
+ busy_lb_count : 136 $ 17.08 $
+ busy_lb_balanced : 131 $ 17.73 $
+ busy_lb_failed : 0 $ 0.00 $
+ busy_lb_imbalance_load : 58
+ busy_lb_imbalance_util : 0
+ busy_lb_imbalance_task : 0
+ busy_lb_imbalance_misfit : 0
+ busy_lb_gained : 7
+ busy_lb_hot_gained : 0
+ busy_lb_nobusyq : 2 $ 1161.50 $
+ busy_lb_nobusyg : 129 $ 18.01 $
+ *busy_lb_success_count : 5
+ *busy_lb_avg_pulled : 1.40
+ ----------------------------------------- <Category idle> ------------------------------------------
+ idle_lb_count : 449 $ 5.17 $
+ idle_lb_balanced : 382 $ 6.08 $
+ idle_lb_failed : 3 $ 774.33 $
+ idle_lb_imbalance_load : 0
+ idle_lb_imbalance_util : 0
+ idle_lb_imbalance_task : 71
+ idle_lb_imbalance_misfit : 0
+ idle_lb_gained : 67
+ idle_lb_hot_gained : 0
+ idle_lb_nobusyq : 0 $ 0.00 $
+ idle_lb_nobusyg : 382 $ 6.08 $
+ *idle_lb_success_count : 64
+ *idle_lb_avg_pulled : 1.05
+ ---------------------------------------- <Category newidle> ----------------------------------------
+ newidle_lb_count : 30471 $ 0.08 $
+ newidle_lb_balanced : 28490 $ 0.08 $
+ newidle_lb_failed : 633 $ 3.67 $
+ newidle_lb_imbalance_load : 0
+ newidle_lb_imbalance_util : 0
+ newidle_lb_imbalance_task : 2040
+ newidle_lb_imbalance_misfit : 0
+ newidle_lb_gained : 1348
+ newidle_lb_hot_gained : 0
+ newidle_lb_nobusyq : 6 $ 387.17 $
+ newidle_lb_nobusyg : 26634 $ 0.09 $
+ *newidle_lb_success_count : 1348
+ *newidle_lb_avg_pulled : 1.00
+ ----------------------------------------------------------------------------------------------------
+
+ Consider following line:
+
+ newidle_lb_balanced : 28490 $ 0.08 $
+
+ While profiling was active, the load-balancer found 28490 times the load
+ needs to be balanced on a newly idle CPU 0. Following value encapsulated
+ inside $ is average jiffies between two events (28490 / 24537 = 0.08).
+
+ Next are active_load_balance() stats. alb did not trigger while the
+ profiling was active, hence it's all 0s.
+
+ --------------------------------- <Category active_load_balance()> ---------------------------------
+ alb_count : 0
+ alb_failed : 0
+ alb_pushed : 0
+ ----------------------------------------------------------------------------------------------------
+
+ Next are sched_balance_exec() and sched_balance_fork() stats. They are
+ not used but we kept it in RFC just for legacy purpose. Unless opposed,
+ we plan to remove them in next revision.
+
+ Next are wakeup statistics. For every domain, the report also shows
+ task-wakeup statistics. Example:
+
+ ------------------------------------------ <Wakeup Info> -------------------------------------------
+ ttwu_wake_remote : 1590
+ ttwu_move_affine : 84
+ ttwu_move_balance : 0
+ ----------------------------------------------------------------------------------------------------
+
+ Same set of stats are reported for each CPU and each domain level.
+
+ How to interpret the diff
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ The `perf sched stats diff` will also start with explaining the columns
+ present in the diff. Then it will show the diff in time in terms of
+ jiffies. The order of the values depends on the order of input data
+ files. It will take `perf.data.old` and `perf.data` respectively as the
+ defaults for comparison. Example:
+
+ ----------------------------------------------------------------------------------------------------
+ Time elapsed (in jiffies) : 2009, 2001
+ ----------------------------------------------------------------------------------------------------
+
+ Below is the sample representing the difference in cpu and domain stats of
+ two runs. Here third column or the values enclosed in `|...|` shows the
+ percent change between the two. Second and fourth columns shows the
+ side-by-side representions of the corresponding fields from `perf sched
+ stats report`.
+
+ ----------------------------------------------------------------------------------------------------
+ CPU <ALL CPUS SUMMARY>
+ ----------------------------------------------------------------------------------------------------
+ DESC COUNT1 COUNT2 PCT_CHANG>
+ ----------------------------------------------------------------------------------------------------
+ yld_count : 0, 0 | 0.00>
+ array_exp : 0, 0 | 0.00>
+ sched_count : 528533, 412573 | -21.94>
+ sched_goidle : 193426, 146082 | -24.48>
+ ttwu_count : 313134, 385975 | 23.26>
+ ttwu_local : 1126, 1282 | 13.85>
+ rq_cpu_time : 8257200244, 8301250047 | 0.53>
+ run_delay : 4728347053, 3997100703 | -15.47>
+ pcount : 335031, 266396 | -20.49>
+ ----------------------------------------------------------------------------------------------------
+
+ Below is the sample of domain stats diff:
+
+ ----------------------------------------------------------------------------------------------------
+ CPU <ALL CPUS SUMMARY>, DOMAIN SMT
+ ----------------------------------------------------------------------------------------------------
+ DESC COUNT1 COUNT2 PCT_CHANG>
+ ----------------------------------------- <Category busy> ------------------------------------------
+ busy_lb_count : 122, 80 | -34.43>
+ busy_lb_balanced : 115, 76 | -33.91>
+ busy_lb_failed : 1, 3 | 200.00>
+ busy_lb_imbalance_load : 35, 49 | 40.00>
+ busy_lb_imbalance_util : 0, 0 | 0.00>
+ busy_lb_imbalance_task : 0, 0 | 0.00>
+ busy_lb_imbalance_misfit : 0, 0 | 0.00>
+ busy_lb_gained : 7, 2 | -71.43>
+ busy_lb_hot_gained : 0, 0 | 0.00>
+ busy_lb_nobusyq : 0, 0 | 0.00>
+ busy_lb_nobusyg : 115, 76 | -33.91>
+ *busy_lb_success_count : 6, 1 | -83.33>
+ *busy_lb_avg_pulled : 1.17, 2.00 | 71.43>
+ ----------------------------------------- <Category idle> ------------------------------------------
+ idle_lb_count : 568, 620 | 9.15>
+ idle_lb_balanced : 462, 449 | -2.81>
+ idle_lb_failed : 11, 21 | 90.91>
+ idle_lb_imbalance_load : 0, 0 | 0.00>
+ idle_lb_imbalance_util : 0, 0 | 0.00>
+ idle_lb_imbalance_task : 115, 189 | 64.35>
+ idle_lb_imbalance_misfit : 0, 0 | 0.00>
+ idle_lb_gained : 103, 169 | 64.08>
+ idle_lb_hot_gained : 0, 0 | 0.00>
+ idle_lb_nobusyq : 0, 0 | 0.00>
+ idle_lb_nobusyg : 462, 449 | -2.81>
+ *idle_lb_success_count : 95, 150 | 57.89>
+ *idle_lb_avg_pulled : 1.08, 1.13 | 3.92>
+ ---------------------------------------- <Category newidle> ----------------------------------------
+ newidle_lb_count : 16961, 3155 | -81.40>
+ newidle_lb_balanced : 15646, 2556 | -83.66>
+ newidle_lb_failed : 397, 142 | -64.23>
+ newidle_lb_imbalance_load : 0, 0 | 0.00>
+ newidle_lb_imbalance_util : 0, 0 | 0.00>
+ newidle_lb_imbalance_task : 1376, 655 | -52.40>
+ newidle_lb_imbalance_misfit : 0, 0 | 0.00>
+ newidle_lb_gained : 917, 457 | -50.16>
+ newidle_lb_hot_gained : 0, 0 | 0.00>
+ newidle_lb_nobusyq : 3, 1 | -66.67>
+ newidle_lb_nobusyg : 14480, 2103 | -85.48>
+ *newidle_lb_success_count : 918, 457 | -50.22>
+ *newidle_lb_avg_pulled : 1.00, 1.00 | 0.11>
+ --------------------------------- <Category active_load_balance()> ---------------------------------
+ alb_count : 0, 1 | 0.00>
+ alb_failed : 0, 0 | 0.00>
+ alb_pushed : 0, 1 | 0.00>
+ --------------------------------- <Category sched_balance_exec()> ----------------------------------
+ sbe_count : 0, 0 | 0.00>
+ sbe_balanced : 0, 0 | 0.00>
+ sbe_pushed : 0, 0 | 0.00>
+ --------------------------------- <Category sched_balance_fork()> ----------------------------------
+ sbf_count : 0, 0 | 0.00>
+ sbf_balanced : 0, 0 | 0.00>
+ sbf_pushed : 0, 0 | 0.00>
+ ------------------------------------------ <Wakeup Info> -------------------------------------------
+ ttwu_wake_remote : 2031, 2914 | 43.48>
+ ttwu_move_affine : 73, 124 | 69.86>
+ ttwu_move_balance : 0, 0 | 0.00>
+ ----------------------------------------------------------------------------------------------------
+
OPTIONS
-------
+Applicable to {record|latency|map|replay|script}
+
-i::
--input=<file>::
Input file name. (default: perf.data unless stdin is a fifo)
--
2.43.0
^ permalink raw reply related [flat|nested] 13+ messages in thread