linux-perf-users.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: kajoljain <kjain@linux.ibm.com>
To: kan.liang@linux.intel.com, acme@kernel.org, irogers@google.com,
	peterz@infradead.org, mingo@redhat.com, namhyung@kernel.org,
	jolsa@kernel.org, adrian.hunter@intel.com,
	john.g.garry@oracle.com, will@kernel.org, james.clark@arm.com,
	mike.leach@linaro.org, leo.yan@linaro.org,
	yuhaixin.yhx@linux.alibaba.com, renyu.zj@linux.alibaba.com,
	tmricht@linux.ibm.com, ravi.bangoria@amd.com,
	atrajeev@linux.vnet.ibm.com, linux-kernel@vger.kernel.org,
	linux-perf-users@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org
Subject: Re: [PATCH V3 2/7] perf mem: Clean up perf_mem_events__ptr()
Date: Tue, 16 Jan 2024 18:52:50 +0530	[thread overview]
Message-ID: <ec397d6e-c37b-8fe7-2a55-e4fdde75e075@linux.ibm.com> (raw)
In-Reply-To: <20231213195154.1085945-3-kan.liang@linux.intel.com>

patch looks fine to me.

Reviwed-by: Kajol Jain <kjain@linux.ibm.com>
Tested-by: Kajol jain <kjain@linux.ibm.com>

Thanks,
Kajol Jain

On 12/14/23 01:21, kan.liang@linux.intel.com wrote:
> From: Kan Liang <kan.liang@linux.intel.com>
> 
> The mem_events can be retrieved from the struct perf_pmu now. An ARCH
> specific perf_mem_events__ptr() is not required anymore. Remove all of
> them.
> 
> The Intel hybrid has multiple mem-events-supported PMUs. But they share
> the same mem_events. Other ARCHs only support one mem-events-supported
> PMU. In the configuration, it's good enough to only configure the
> mem_events for one PMU. Add perf_mem_events_find_pmu() which returns the
> first mem-events-supported PMU.
> 
> In the perf_mem_events__init(), the perf_pmus__scan() is not required
> anymore. It avoids checking the sysfs for every PMU on the system.
> 
> Make the perf_mem_events__record_args() more generic. Remove the
> perf_mem_events__print_unsupport_hybrid().
> 
> Since pmu is added as a new parameter, rename perf_mem_events__ptr() to
> perf_pmu__mem_events_ptr(). Several other functions also do a similar
> rename.
> 
> Reviewed-by: Ian Rogers <irogers@google.com>
> Tested-by: Ravi Bangoria <ravi.bangoria@amd.com>
> Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
> ---
>  tools/perf/arch/arm64/util/mem-events.c |  10 +--
>  tools/perf/arch/x86/util/mem-events.c   |  18 ++---
>  tools/perf/builtin-c2c.c                |  28 +++++--
>  tools/perf/builtin-mem.c                |  28 +++++--
>  tools/perf/util/mem-events.c            | 103 ++++++++++++------------
>  tools/perf/util/mem-events.h            |   9 ++-
>  6 files changed, 104 insertions(+), 92 deletions(-)
> 
> diff --git a/tools/perf/arch/arm64/util/mem-events.c b/tools/perf/arch/arm64/util/mem-events.c
> index edf8207f7812..d3e69a520c2b 100644
> --- a/tools/perf/arch/arm64/util/mem-events.c
> +++ b/tools/perf/arch/arm64/util/mem-events.c
> @@ -13,17 +13,9 @@ struct perf_mem_event perf_mem_events_arm[PERF_MEM_EVENTS__MAX] = {
>  
>  static char mem_ev_name[100];
>  
> -struct perf_mem_event *perf_mem_events__ptr(int i)
> -{
> -	if (i >= PERF_MEM_EVENTS__MAX)
> -		return NULL;
> -
> -	return &perf_mem_events_arm[i];
> -}
> -
>  const char *perf_mem_events__name(int i, const char *pmu_name __maybe_unused)
>  {
> -	struct perf_mem_event *e = perf_mem_events__ptr(i);
> +	struct perf_mem_event *e = &perf_mem_events_arm[i];
>  
>  	if (i >= PERF_MEM_EVENTS__MAX)
>  		return NULL;
> diff --git a/tools/perf/arch/x86/util/mem-events.c b/tools/perf/arch/x86/util/mem-events.c
> index 2b81d229982c..5fb41d50118d 100644
> --- a/tools/perf/arch/x86/util/mem-events.c
> +++ b/tools/perf/arch/x86/util/mem-events.c
> @@ -28,17 +28,6 @@ struct perf_mem_event perf_mem_events_amd[PERF_MEM_EVENTS__MAX] = {
>  	E("mem-ldst",	"ibs_op//",	"ibs_op"),
>  };
>  
> -struct perf_mem_event *perf_mem_events__ptr(int i)
> -{
> -	if (i >= PERF_MEM_EVENTS__MAX)
> -		return NULL;
> -
> -	if (x86__is_amd_cpu())
> -		return &perf_mem_events_amd[i];
> -
> -	return &perf_mem_events_intel[i];
> -}
> -
>  bool is_mem_loads_aux_event(struct evsel *leader)
>  {
>  	struct perf_pmu *pmu = perf_pmus__find("cpu");
> @@ -54,7 +43,12 @@ bool is_mem_loads_aux_event(struct evsel *leader)
>  
>  const char *perf_mem_events__name(int i, const char *pmu_name)
>  {
> -	struct perf_mem_event *e = perf_mem_events__ptr(i);
> +	struct perf_mem_event *e;
> +
> +	if (x86__is_amd_cpu())
> +		e = &perf_mem_events_amd[i];
> +	else
> +		e = &perf_mem_events_intel[i];
>  
>  	if (!e)
>  		return NULL;
> diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
> index f78eea9e2153..838481505e08 100644
> --- a/tools/perf/builtin-c2c.c
> +++ b/tools/perf/builtin-c2c.c
> @@ -3215,12 +3215,19 @@ static int parse_record_events(const struct option *opt,
>  			       const char *str, int unset __maybe_unused)
>  {
>  	bool *event_set = (bool *) opt->value;
> +	struct perf_pmu *pmu;
> +
> +	pmu = perf_mem_events_find_pmu();
> +	if (!pmu) {
> +		pr_err("failed: there is no PMU that supports perf c2c\n");
> +		exit(-1);
> +	}
>  
>  	if (!strcmp(str, "list")) {
> -		perf_mem_events__list();
> +		perf_pmu__mem_events_list(pmu);
>  		exit(0);
>  	}
> -	if (perf_mem_events__parse(str))
> +	if (perf_pmu__mem_events_parse(pmu, str))
>  		exit(-1);
>  
>  	*event_set = true;
> @@ -3245,6 +3252,7 @@ static int perf_c2c__record(int argc, const char **argv)
>  	bool all_user = false, all_kernel = false;
>  	bool event_set = false;
>  	struct perf_mem_event *e;
> +	struct perf_pmu *pmu;
>  	struct option options[] = {
>  	OPT_CALLBACK('e', "event", &event_set, "event",
>  		     "event selector. Use 'perf c2c record -e list' to list available events",
> @@ -3256,7 +3264,13 @@ static int perf_c2c__record(int argc, const char **argv)
>  	OPT_END()
>  	};
>  
> -	if (perf_mem_events__init()) {
> +	pmu = perf_mem_events_find_pmu();
> +	if (!pmu) {
> +		pr_err("failed: no PMU supports the memory events\n");
> +		return -1;
> +	}
> +
> +	if (perf_pmu__mem_events_init(pmu)) {
>  		pr_err("failed: memory events not supported\n");
>  		return -1;
>  	}
> @@ -3280,7 +3294,7 @@ static int perf_c2c__record(int argc, const char **argv)
>  	rec_argv[i++] = "record";
>  
>  	if (!event_set) {
> -		e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD_STORE);
> +		e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__LOAD_STORE);
>  		/*
>  		 * The load and store operations are required, use the event
>  		 * PERF_MEM_EVENTS__LOAD_STORE if it is supported.
> @@ -3289,15 +3303,15 @@ static int perf_c2c__record(int argc, const char **argv)
>  			e->record = true;
>  			rec_argv[i++] = "-W";
>  		} else {
> -			e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD);
> +			e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__LOAD);
>  			e->record = true;
>  
> -			e = perf_mem_events__ptr(PERF_MEM_EVENTS__STORE);
> +			e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__STORE);
>  			e->record = true;
>  		}
>  	}
>  
> -	e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD);
> +	e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__LOAD);
>  	if (e->record)
>  		rec_argv[i++] = "-W";
>  
> diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
> index 51499c20da01..ef64bae77ca7 100644
> --- a/tools/perf/builtin-mem.c
> +++ b/tools/perf/builtin-mem.c
> @@ -43,12 +43,19 @@ static int parse_record_events(const struct option *opt,
>  			       const char *str, int unset __maybe_unused)
>  {
>  	struct perf_mem *mem = *(struct perf_mem **)opt->value;
> +	struct perf_pmu *pmu;
> +
> +	pmu = perf_mem_events_find_pmu();
> +	if (!pmu) {
> +		pr_err("failed: there is no PMU that supports perf mem\n");
> +		exit(-1);
> +	}
>  
>  	if (!strcmp(str, "list")) {
> -		perf_mem_events__list();
> +		perf_pmu__mem_events_list(pmu);
>  		exit(0);
>  	}
> -	if (perf_mem_events__parse(str))
> +	if (perf_pmu__mem_events_parse(pmu, str))
>  		exit(-1);
>  
>  	mem->operation = 0;
> @@ -72,6 +79,7 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
>  	int ret;
>  	bool all_user = false, all_kernel = false;
>  	struct perf_mem_event *e;
> +	struct perf_pmu *pmu;
>  	struct option options[] = {
>  	OPT_CALLBACK('e', "event", &mem, "event",
>  		     "event selector. use 'perf mem record -e list' to list available events",
> @@ -84,7 +92,13 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
>  	OPT_END()
>  	};
>  
> -	if (perf_mem_events__init()) {
> +	pmu = perf_mem_events_find_pmu();
> +	if (!pmu) {
> +		pr_err("failed: no PMU supports the memory events\n");
> +		return -1;
> +	}
> +
> +	if (perf_pmu__mem_events_init(pmu)) {
>  		pr_err("failed: memory events not supported\n");
>  		return -1;
>  	}
> @@ -113,7 +127,7 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
>  
>  	rec_argv[i++] = "record";
>  
> -	e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD_STORE);
> +	e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__LOAD_STORE);
>  
>  	/*
>  	 * The load and store operations are required, use the event
> @@ -126,17 +140,17 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
>  		rec_argv[i++] = "-W";
>  	} else {
>  		if (mem->operation & MEM_OPERATION_LOAD) {
> -			e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD);
> +			e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__LOAD);
>  			e->record = true;
>  		}
>  
>  		if (mem->operation & MEM_OPERATION_STORE) {
> -			e = perf_mem_events__ptr(PERF_MEM_EVENTS__STORE);
> +			e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__STORE);
>  			e->record = true;
>  		}
>  	}
>  
> -	e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD);
> +	e = perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__LOAD);
>  	if (e->record)
>  		rec_argv[i++] = "-W";
>  
> diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
> index 0a8f415f5efe..27a33dc44964 100644
> --- a/tools/perf/util/mem-events.c
> +++ b/tools/perf/util/mem-events.c
> @@ -29,17 +29,42 @@ struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
>  static char mem_loads_name[100];
>  static bool mem_loads_name__init;
>  
> -struct perf_mem_event * __weak perf_mem_events__ptr(int i)
> +struct perf_mem_event *perf_pmu__mem_events_ptr(struct perf_pmu *pmu, int i)
>  {
> -	if (i >= PERF_MEM_EVENTS__MAX)
> +	if (i >= PERF_MEM_EVENTS__MAX || !pmu)
>  		return NULL;
>  
> -	return &perf_mem_events[i];
> +	return &pmu->mem_events[i];
> +}
> +
> +static struct perf_pmu *perf_pmus__scan_mem(struct perf_pmu *pmu)
> +{
> +	while ((pmu = perf_pmus__scan(pmu)) != NULL) {
> +		if (pmu->mem_events)
> +			return pmu;
> +	}
> +	return NULL;
> +}
> +
> +struct perf_pmu *perf_mem_events_find_pmu(void)
> +{
> +	/*
> +	 * The current perf mem doesn't support per-PMU configuration.
> +	 * The exact same configuration is applied to all the
> +	 * mem_events supported PMUs.
> +	 * Return the first mem_events supported PMU.
> +	 *
> +	 * Notes: The only case which may support multiple mem_events
> +	 * supported PMUs is Intel hybrid. The exact same mem_events
> +	 * is shared among the PMUs. Only configure the first PMU
> +	 * is good enough as well.
> +	 */
> +	return perf_pmus__scan_mem(NULL);
>  }
>  
>  const char * __weak perf_mem_events__name(int i, const char *pmu_name  __maybe_unused)
>  {
> -	struct perf_mem_event *e = perf_mem_events__ptr(i);
> +	struct perf_mem_event *e = &perf_mem_events[i];
>  
>  	if (!e)
>  		return NULL;
> @@ -61,7 +86,7 @@ __weak bool is_mem_loads_aux_event(struct evsel *leader __maybe_unused)
>  	return false;
>  }
>  
> -int perf_mem_events__parse(const char *str)
> +int perf_pmu__mem_events_parse(struct perf_pmu *pmu, const char *str)
>  {
>  	char *tok, *saveptr = NULL;
>  	bool found = false;
> @@ -79,7 +104,7 @@ int perf_mem_events__parse(const char *str)
>  
>  	while (tok) {
>  		for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
> -			struct perf_mem_event *e = perf_mem_events__ptr(j);
> +			struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j);
>  
>  			if (!e->tag)
>  				continue;
> @@ -112,7 +137,7 @@ static bool perf_mem_event__supported(const char *mnt, struct perf_pmu *pmu,
>  	return !stat(path, &st);
>  }
>  
> -int perf_mem_events__init(void)
> +int perf_pmu__mem_events_init(struct perf_pmu *pmu)
>  {
>  	const char *mnt = sysfs__mount();
>  	bool found = false;
> @@ -122,8 +147,7 @@ int perf_mem_events__init(void)
>  		return -ENOENT;
>  
>  	for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
> -		struct perf_mem_event *e = perf_mem_events__ptr(j);
> -		struct perf_pmu *pmu = NULL;
> +		struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j);
>  
>  		/*
>  		 * If the event entry isn't valid, skip initialization
> @@ -132,29 +156,20 @@ int perf_mem_events__init(void)
>  		if (!e->tag)
>  			continue;
>  
> -		/*
> -		 * Scan all PMUs not just core ones, since perf mem/c2c on
> -		 * platforms like AMD uses IBS OP PMU which is independent
> -		 * of core PMU.
> -		 */
> -		while ((pmu = perf_pmus__scan(pmu)) != NULL) {
> -			e->supported |= perf_mem_event__supported(mnt, pmu, e);
> -			if (e->supported) {
> -				found = true;
> -				break;
> -			}
> -		}
> +		e->supported |= perf_mem_event__supported(mnt, pmu, e);
> +		if (e->supported)
> +			found = true;
>  	}
>  
>  	return found ? 0 : -ENOENT;
>  }
>  
> -void perf_mem_events__list(void)
> +void perf_pmu__mem_events_list(struct perf_pmu *pmu)
>  {
>  	int j;
>  
>  	for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
> -		struct perf_mem_event *e = perf_mem_events__ptr(j);
> +		struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j);
>  
>  		fprintf(stderr, "%-*s%-*s%s",
>  			e->tag ? 13 : 0,
> @@ -165,50 +180,32 @@ void perf_mem_events__list(void)
>  	}
>  }
>  
> -static void perf_mem_events__print_unsupport_hybrid(struct perf_mem_event *e,
> -						    int idx)
> -{
> -	const char *mnt = sysfs__mount();
> -	struct perf_pmu *pmu = NULL;
> -
> -	while ((pmu = perf_pmus__scan(pmu)) != NULL) {
> -		if (!perf_mem_event__supported(mnt, pmu, e)) {
> -			pr_err("failed: event '%s' not supported\n",
> -			       perf_mem_events__name(idx, pmu->name));
> -		}
> -	}
> -}
> -
>  int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
>  				 char **rec_tmp, int *tmp_nr)
>  {
>  	const char *mnt = sysfs__mount();
> +	struct perf_pmu *pmu = NULL;
>  	int i = *argv_nr, k = 0;
>  	struct perf_mem_event *e;
>  
> -	for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
> -		e = perf_mem_events__ptr(j);
> -		if (!e->record)
> -			continue;
>  
> -		if (perf_pmus__num_mem_pmus() == 1) {
> -			if (!e->supported) {
> -				pr_err("failed: event '%s' not supported\n",
> -				       perf_mem_events__name(j, NULL));
> -				return -1;
> -			}
> +	while ((pmu = perf_pmus__scan_mem(pmu)) != NULL) {
> +		for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
> +			e = perf_pmu__mem_events_ptr(pmu, j);
>  
> -			rec_argv[i++] = "-e";
> -			rec_argv[i++] = perf_mem_events__name(j, NULL);
> -		} else {
> -			struct perf_pmu *pmu = NULL;
> +			if (!e->record)
> +				continue;
>  
>  			if (!e->supported) {
> -				perf_mem_events__print_unsupport_hybrid(e, j);
> +				pr_err("failed: event '%s' not supported\n",
> +					perf_mem_events__name(j, pmu->name));
>  				return -1;
>  			}
>  
> -			while ((pmu = perf_pmus__scan(pmu)) != NULL) {
> +			if (perf_pmus__num_mem_pmus() == 1) {
> +				rec_argv[i++] = "-e";
> +				rec_argv[i++] = perf_mem_events__name(j, NULL);
> +			} else {
>  				const char *s = perf_mem_events__name(j, pmu->name);
>  
>  				if (!perf_mem_event__supported(mnt, pmu, e))
> diff --git a/tools/perf/util/mem-events.h b/tools/perf/util/mem-events.h
> index 8c5694b2d0b0..0ad301a2e424 100644
> --- a/tools/perf/util/mem-events.h
> +++ b/tools/perf/util/mem-events.h
> @@ -36,14 +36,15 @@ enum {
>  extern unsigned int perf_mem_events__loads_ldlat;
>  extern struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX];
>  
> -int perf_mem_events__parse(const char *str);
> -int perf_mem_events__init(void);
> +int perf_pmu__mem_events_parse(struct perf_pmu *pmu, const char *str);
> +int perf_pmu__mem_events_init(struct perf_pmu *pmu);
>  
>  const char *perf_mem_events__name(int i, const char *pmu_name);
> -struct perf_mem_event *perf_mem_events__ptr(int i);
> +struct perf_mem_event *perf_pmu__mem_events_ptr(struct perf_pmu *pmu, int i);
> +struct perf_pmu *perf_mem_events_find_pmu(void);
>  bool is_mem_loads_aux_event(struct evsel *leader);
>  
> -void perf_mem_events__list(void);
> +void perf_pmu__mem_events_list(struct perf_pmu *pmu);
>  int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
>  				 char **rec_tmp, int *tmp_nr);
>  

  reply	other threads:[~2024-01-16 13:23 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-12-13 19:51 [PATCH V3 0/7] Clean up perf mem kan.liang
2023-12-13 19:51 ` [PATCH V3 1/7] perf mem: Add mem_events into the supported perf_pmu kan.liang
2023-12-19  8:48   ` kajoljain
2023-12-13 19:51 ` [PATCH V3 2/7] perf mem: Clean up perf_mem_events__ptr() kan.liang
2024-01-16 13:22   ` kajoljain [this message]
2023-12-13 19:51 ` [PATCH V3 3/7] perf mem: Clean up perf_mem_events__name() kan.liang
2024-01-16 13:58   ` kajoljain
2023-12-13 19:51 ` [PATCH V3 4/7] perf mem: Clean up perf_mem_event__supported() kan.liang
2023-12-13 19:51 ` [PATCH V3 5/7] perf mem: Clean up is_mem_loads_aux_event() kan.liang
2023-12-13 19:51 ` [PATCH V3 6/7] perf mem: Clean up perf_mem_events__record_args() kan.liang
2023-12-13 19:51 ` [PATCH V3 7/7] perf mem: Clean up perf_pmus__num_mem_pmus() kan.liang
2023-12-16  3:29 ` [PATCH V3 0/7] Clean up perf mem Leo Yan
2023-12-19  9:26 ` kajoljain
2023-12-19 14:15   ` Liang, Kan
2024-01-02 20:08     ` Liang, Kan
2024-01-05  6:38       ` kajoljain
2024-01-05 14:38         ` Liang, Kan
2024-01-16 14:05           ` kajoljain
2024-01-16 16:37             ` Liang, Kan
2024-01-23  5:30               ` kajoljain
2024-01-23  5:56                 ` Thomas Richter
2024-01-23 14:36                   ` Liang, Kan
2024-01-07  4:08 ` Leo Yan
2024-01-09 14:01   ` Liang, Kan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ec397d6e-c37b-8fe7-2a55-e4fdde75e075@linux.ibm.com \
    --to=kjain@linux.ibm.com \
    --cc=acme@kernel.org \
    --cc=adrian.hunter@intel.com \
    --cc=atrajeev@linux.vnet.ibm.com \
    --cc=irogers@google.com \
    --cc=james.clark@arm.com \
    --cc=john.g.garry@oracle.com \
    --cc=jolsa@kernel.org \
    --cc=kan.liang@linux.intel.com \
    --cc=leo.yan@linaro.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-perf-users@vger.kernel.org \
    --cc=mike.leach@linaro.org \
    --cc=mingo@redhat.com \
    --cc=namhyung@kernel.org \
    --cc=peterz@infradead.org \
    --cc=ravi.bangoria@amd.com \
    --cc=renyu.zj@linux.alibaba.com \
    --cc=tmricht@linux.ibm.com \
    --cc=will@kernel.org \
    --cc=yuhaixin.yhx@linux.alibaba.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).