From: Alexey Budankov <alexey.budankov@linux.intel.com>
To: Peter Zijlstra <peterz@infradead.org>,
Ingo Molnar <mingo@redhat.com>,
Arnaldo Carvalho de Melo <acme@kernel.org>,
Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>, Kan Liang <kan.liang@intel.com>,
Dmitri Prokhorov <Dmitry.Prohorov@intel.com>,
Valery Cherepennikov <valery.cherepennikov@intel.com>,
Mark Rutland <mark.rutland@arm.com>,
Stephane Eranian <eranian@google.com>,
David Carrillo-Cisneros <davidcc@google.com>,
linux-kernel <linux-kernel@vger.kernel.org>
Subject: [PATCH v7 2/2] perf/core: add mux switch to skip to the current CPU's events list on mux interrupt
Date: Fri, 18 Aug 2017 08:22:47 +0300 [thread overview]
Message-ID: <edf9ed7e-a4d0-dbce-53cc-883f9ab40957@linux.intel.com> (raw)
In-Reply-To: <d676ecc5-488a-e82c-b85d-ccc8e0f02c4e@linux.intel.com>
This patch implements mux switch that triggers skipping to the
current CPU's events list at mulitplexing hrtimer interrupt
handler as well as adoption of the switch in the existing
implementation.
perf_event_groups_iterate_cpu() API is introduced to implement
iteration thru the certain CPU groups list skipping groups
allocated for the other CPUs.
Signed-off-by: Alexey Budankov <alexey.budankov@linux.intel.com>
---
kernel/events/core.c | 193 ++++++++++++++++++++++++++++++++++++---------------
1 file changed, 137 insertions(+), 56 deletions(-)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 08ccfb2..aeb0f81 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -556,11 +556,11 @@ void perf_sample_event_took(u64 sample_len_ns)
static atomic64_t perf_event_id;
static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
- enum event_type_t event_type);
+ enum event_type_t event_type, int mux);
static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
enum event_type_t event_type,
- struct task_struct *task);
+ struct task_struct *task, int mux);
static void update_context_time(struct perf_event_context *ctx);
static u64 perf_event_time(struct perf_event *event);
@@ -702,6 +702,7 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
struct perf_cpu_context *cpuctx;
struct list_head *list;
unsigned long flags;
+ int mux = 0;
/*
* Disable interrupts and preemption to avoid this CPU's
@@ -717,7 +718,7 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
perf_pmu_disable(cpuctx->ctx.pmu);
if (mode & PERF_CGROUP_SWOUT) {
- cpu_ctx_sched_out(cpuctx, EVENT_ALL);
+ cpu_ctx_sched_out(cpuctx, EVENT_ALL, mux);
/*
* must not be done before ctxswout due
* to event_filter_match() in event_sched_out()
@@ -736,7 +737,7 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
*/
cpuctx->cgrp = perf_cgroup_from_task(task,
&cpuctx->ctx);
- cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
+ cpu_ctx_sched_in(cpuctx, EVENT_ALL, task, mux);
}
perf_pmu_enable(cpuctx->ctx.pmu);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
@@ -1613,8 +1614,16 @@ perf_event_groups_rotate(struct rb_root *groups, int cpu)
*/
#define perf_event_groups_for_each(event, iter, tree, node, list, link) \
for (iter = rb_first(tree); iter; iter = rb_next(iter)) \
- list_for_each_entry(event, &(rb_entry(iter, \
- typeof(*event), node)->list), link)
+ list_for_each_entry(event, &(rb_entry(iter, \
+ typeof(*event), node)->list), link)
+
+/*
+ * Iterate event groups related to specific cpu.
+ */
+#define perf_event_groups_for_each_cpu(event, cpu, tree, list, link) \
+ list = perf_event_groups_get_list(tree, cpu); \
+ if (list) \
+ list_for_each_entry(event, list, link)
/*
* Add a event from the lists for its context.
@@ -2397,36 +2406,38 @@ static void add_event_to_ctx(struct perf_event *event,
static void ctx_sched_out(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
- enum event_type_t event_type);
+ enum event_type_t event_type, int mux);
static void
ctx_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
enum event_type_t event_type,
- struct task_struct *task);
+ struct task_struct *task, int mux);
static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx,
enum event_type_t event_type)
{
+ int mux = 0;
+
if (!cpuctx->task_ctx)
return;
if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
return;
- ctx_sched_out(ctx, cpuctx, event_type);
+ ctx_sched_out(ctx, cpuctx, event_type, mux);
}
static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx,
- struct task_struct *task)
+ struct task_struct *task, int mux)
{
- cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
+ cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task, mux);
if (ctx)
- ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
- cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
+ ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task, mux);
+ cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task, mux);
if (ctx)
- ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
+ ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task, mux);
}
/*
@@ -2450,6 +2461,7 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
{
enum event_type_t ctx_event_type = event_type & EVENT_ALL;
bool cpu_event = !!(event_type & EVENT_CPU);
+ int mux = 0;
/*
* If pinned groups are involved, flexible groups also need to be
@@ -2470,11 +2482,11 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
* - otherwise, do nothing more.
*/
if (cpu_event)
- cpu_ctx_sched_out(cpuctx, ctx_event_type);
+ cpu_ctx_sched_out(cpuctx, ctx_event_type, mux);
else if (ctx_event_type & EVENT_PINNED)
- cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
+ cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE, mux);
- perf_event_sched_in(cpuctx, task_ctx, current);
+ perf_event_sched_in(cpuctx, task_ctx, current, mux);
perf_pmu_enable(cpuctx->ctx.pmu);
}
@@ -2491,7 +2503,7 @@ static int __perf_install_in_context(void *info)
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
struct perf_event_context *task_ctx = cpuctx->task_ctx;
bool reprogram = true;
- int ret = 0;
+ int ret = 0, mux =0;
raw_spin_lock(&cpuctx->ctx.lock);
if (ctx->task) {
@@ -2518,7 +2530,7 @@ static int __perf_install_in_context(void *info)
}
if (reprogram) {
- ctx_sched_out(ctx, cpuctx, EVENT_TIME);
+ ctx_sched_out(ctx, cpuctx, EVENT_TIME, mux);
add_event_to_ctx(event, ctx);
ctx_resched(cpuctx, task_ctx, get_event_type(event));
} else {
@@ -2655,13 +2667,14 @@ static void __perf_event_enable(struct perf_event *event,
{
struct perf_event *leader = event->group_leader;
struct perf_event_context *task_ctx;
+ int mux = 0;
if (event->state >= PERF_EVENT_STATE_INACTIVE ||
event->state <= PERF_EVENT_STATE_ERROR)
return;
if (ctx->is_active)
- ctx_sched_out(ctx, cpuctx, EVENT_TIME);
+ ctx_sched_out(ctx, cpuctx, EVENT_TIME, mux);
__perf_event_mark_enabled(event);
@@ -2671,7 +2684,7 @@ static void __perf_event_enable(struct perf_event *event,
if (!event_filter_match(event)) {
if (is_cgroup_event(event))
perf_cgroup_defer_enabled(event);
- ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
+ ctx_sched_in(ctx, cpuctx, EVENT_TIME, current, mux);
return;
}
@@ -2680,7 +2693,7 @@ static void __perf_event_enable(struct perf_event *event,
* then don't put it on unless the group is on.
*/
if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) {
- ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
+ ctx_sched_in(ctx, cpuctx, EVENT_TIME, current, mux);
return;
}
@@ -2876,11 +2889,13 @@ EXPORT_SYMBOL_GPL(perf_event_refresh);
static void ctx_sched_out(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
- enum event_type_t event_type)
+ enum event_type_t event_type, int mux)
{
int is_active = ctx->is_active;
+ struct list_head *group_list;
struct perf_event *event;
struct rb_node *node;
+ int sw = -1, cpu = smp_processor_id();
lockdep_assert_held(&ctx->lock);
if (likely(!ctx->nr_events)) {
@@ -2926,17 +2941,47 @@ static void ctx_sched_out(struct perf_event_context *ctx,
perf_pmu_disable(ctx->pmu);
- if (is_active & EVENT_PINNED)
- perf_event_groups_for_each(event, node,
- &ctx->pinned_groups, group_node,
- group_list, group_entry)
- group_sched_out(event, cpuctx, ctx);
+ if (is_active & EVENT_PINNED) {
+ if (mux) {
+ perf_event_groups_for_each_cpu(event, cpu,
+ &ctx->pinned_groups,
+ group_list, group_entry) {
+ group_sched_out(event, cpuctx, ctx);
+ }
+ perf_event_groups_for_each_cpu(event, sw,
+ &ctx->pinned_groups,
+ group_list, group_entry) {
+ group_sched_out(event, cpuctx, ctx);
+ }
+ } else {
+ perf_event_groups_for_each(event, node,
+ &ctx->pinned_groups, group_node,
+ group_list, group_entry) {
+ group_sched_out(event, cpuctx, ctx);
+ }
+ }
+ }
- if (is_active & EVENT_FLEXIBLE)
- perf_event_groups_for_each(event, node,
- &ctx->flexible_groups, group_node,
- group_list, group_entry)
- group_sched_out(event, cpuctx, ctx);
+ if (is_active & EVENT_FLEXIBLE) {
+ if (mux) {
+ perf_event_groups_for_each_cpu(event, cpu,
+ &ctx->flexible_groups,
+ group_list, group_entry) {
+ group_sched_out(event, cpuctx, ctx);
+ }
+ perf_event_groups_for_each_cpu(event, sw,
+ &ctx->flexible_groups,
+ group_list, group_entry) {
+ group_sched_out(event, cpuctx, ctx);
+ }
+ } else {
+ perf_event_groups_for_each(event, node,
+ &ctx->flexible_groups, group_node,
+ group_list, group_entry) {
+ group_sched_out(event, cpuctx, ctx);
+ }
+ }
+ }
perf_pmu_enable(ctx->pmu);
}
@@ -3225,9 +3270,9 @@ void __perf_event_task_sched_out(struct task_struct *task,
* Called with IRQs disabled
*/
static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
- enum event_type_t event_type)
+ enum event_type_t event_type, int mux)
{
- ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
+ ctx_sched_out(&cpuctx->ctx, cpuctx, event_type, mux);
}
static void
@@ -3287,11 +3332,13 @@ static void
ctx_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
enum event_type_t event_type,
- struct task_struct *task)
+ struct task_struct *task, int mux)
{
int is_active = ctx->is_active;
+ struct list_head *group_list;
struct perf_event *event;
struct rb_node *node;
+ int sw = -1, cpu = smp_processor_id();
lockdep_assert_held(&ctx->lock);
@@ -3319,35 +3366,69 @@ ctx_sched_in(struct perf_event_context *ctx,
* First go through the list and put on any pinned groups
* in order to give them the best chance of going on.
*/
- if (is_active & EVENT_PINNED)
- perf_event_groups_for_each(event, node,
- &ctx->pinned_groups, group_node,
- group_list, group_entry)
- ctx_pinned_sched_in(event, cpuctx, ctx);
+ if (is_active & EVENT_PINNED) {
+ if (mux) {
+ perf_event_groups_for_each_cpu(event, sw,
+ &ctx->pinned_groups,
+ group_list, group_entry) {
+ ctx_pinned_sched_in(event, cpuctx, ctx);
+ }
+ perf_event_groups_for_each_cpu(event, cpu,
+ &ctx->pinned_groups,
+ group_list, group_entry) {
+ ctx_pinned_sched_in(event, cpuctx, ctx);
+ }
+ } else {
+ perf_event_groups_for_each(event, node,
+ &ctx->pinned_groups, group_node,
+ group_list, group_entry) {
+ ctx_pinned_sched_in(event, cpuctx, ctx);
+ }
+ }
+ }
/* Then walk through the lower prio flexible groups */
if (is_active & EVENT_FLEXIBLE) {
int can_add_hw = 1;
- perf_event_groups_for_each(event, node,
- &ctx->flexible_groups, group_node,
- group_list, group_entry)
- ctx_flexible_sched_in(event, cpuctx, ctx, &can_add_hw);
+ if (mux) {
+ perf_event_groups_for_each_cpu(event, sw,
+ &ctx->flexible_groups,
+ group_list, group_entry) {
+ ctx_flexible_sched_in(event, cpuctx,
+ ctx, &can_add_hw);
+ }
+ can_add_hw = 1;
+ perf_event_groups_for_each_cpu(event, cpu,
+ &ctx->flexible_groups,
+ group_list, group_entry) {
+ ctx_flexible_sched_in(event, cpuctx,
+ ctx, &can_add_hw);
+ }
+ } else {
+ perf_event_groups_for_each(event, node,
+ &ctx->flexible_groups, group_node,
+ group_list, group_entry) {
+ ctx_flexible_sched_in(event, cpuctx,
+ ctx, &can_add_hw);
+ }
+ }
}
}
static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
enum event_type_t event_type,
- struct task_struct *task)
+ struct task_struct *task, int mux)
{
struct perf_event_context *ctx = &cpuctx->ctx;
- ctx_sched_in(ctx, cpuctx, event_type, task);
+ ctx_sched_in(ctx, cpuctx, event_type, task, mux);
}
static void perf_event_context_sched_in(struct perf_event_context *ctx,
struct task_struct *task)
{
struct perf_cpu_context *cpuctx;
+ int mux = 0;
cpuctx = __get_cpu_context(ctx);
if (cpuctx->task_ctx == ctx)
@@ -3371,8 +3452,8 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
* events, no need to flip the cpuctx's events around.
*/
if (!RB_EMPTY_ROOT(&ctx->pinned_groups))
- cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
- perf_event_sched_in(cpuctx, ctx, task);
+ cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE, mux);
+ perf_event_sched_in(cpuctx, ctx, task, mux);
perf_pmu_enable(ctx->pmu);
unlock:
@@ -3618,7 +3699,7 @@ static void rotate_ctx(struct perf_event_context *ctx)
static int perf_rotate_context(struct perf_cpu_context *cpuctx)
{
struct perf_event_context *ctx = NULL;
- int rotate = 0;
+ int rotate = 0, mux = 1;
if (cpuctx->ctx.nr_events) {
if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
@@ -3637,15 +3718,15 @@ static int perf_rotate_context(struct perf_cpu_context *cpuctx)
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_pmu_disable(cpuctx->ctx.pmu);
- cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
+ cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE, mux);
if (ctx)
- ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
+ ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE, mux);
rotate_ctx(&cpuctx->ctx);
if (ctx)
rotate_ctx(ctx);
- perf_event_sched_in(cpuctx, ctx, current);
+ perf_event_sched_in(cpuctx, ctx, current, mux);
perf_pmu_enable(cpuctx->ctx.pmu);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
@@ -3696,7 +3777,7 @@ static void perf_event_enable_on_exec(int ctxn)
struct perf_cpu_context *cpuctx;
struct perf_event *event;
unsigned long flags;
- int enabled = 0;
+ int enabled = 0, mux = 0;
local_irq_save(flags);
ctx = current->perf_event_ctxp[ctxn];
@@ -3705,7 +3786,7 @@ static void perf_event_enable_on_exec(int ctxn)
cpuctx = __get_cpu_context(ctx);
perf_ctx_lock(cpuctx, ctx);
- ctx_sched_out(ctx, cpuctx, EVENT_TIME);
+ ctx_sched_out(ctx, cpuctx, EVENT_TIME, mux);
list_for_each_entry(event, &ctx->event_list, event_entry) {
enabled |= event_enable_on_exec(event, ctx);
event_type |= get_event_type(event);
@@ -3718,7 +3799,7 @@ static void perf_event_enable_on_exec(int ctxn)
clone_ctx = unclone_ctx(ctx);
ctx_resched(cpuctx, ctx, event_type);
} else {
- ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
+ ctx_sched_in(ctx, cpuctx, EVENT_TIME, current, mux);
}
perf_ctx_unlock(cpuctx, ctx);
next prev parent reply other threads:[~2017-08-18 5:22 UTC|newest]
Thread overview: 76+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-08-02 8:11 [PATCH v6 0/3] perf/core: addressing 4x slowdown during per-process profiling of STREAM benchmark on Intel Xeon Phi Alexey Budankov
2017-08-02 8:13 ` [PATCH v6 1/3] perf/core: use rb trees for pinned/flexible groups Alexey Budankov
2017-08-03 13:00 ` Peter Zijlstra
2017-08-03 20:30 ` Alexey Budankov
2017-08-04 14:36 ` Peter Zijlstra
2017-08-07 7:17 ` Alexey Budankov
2017-08-07 8:39 ` Peter Zijlstra
2017-08-07 9:13 ` Peter Zijlstra
2017-08-07 15:32 ` Alexey Budankov
2017-08-07 15:55 ` Peter Zijlstra
2017-08-07 16:27 ` Alexey Budankov
2017-08-07 16:57 ` Peter Zijlstra
2017-08-07 17:39 ` Andi Kleen
2017-08-07 18:12 ` Peter Zijlstra
2017-08-07 18:13 ` Alexey Budankov
2017-08-15 17:28 ` Alexey Budankov
2017-08-23 13:39 ` Alexander Shishkin
2017-08-23 14:18 ` Alexey Budankov
2017-08-29 13:51 ` Alexander Shishkin
2017-08-30 8:30 ` Alexey Budankov
2017-08-30 10:18 ` Alexander Shishkin
2017-08-30 10:30 ` Alexey Budankov
2017-08-30 11:13 ` Alexander Shishkin
2017-08-30 11:16 ` Alexey Budankov
2017-08-31 10:12 ` Alexey Budankov
2017-08-31 10:12 ` Alexey Budankov
2017-08-04 14:53 ` Peter Zijlstra
2017-08-07 15:22 ` Alexey Budankov
2017-08-02 8:15 ` [PATCH v6 2/3]: perf/core: use context tstamp_data for skipped events on mux interrupt Alexey Budankov
2017-08-03 13:04 ` Peter Zijlstra
2017-08-03 14:00 ` Peter Zijlstra
2017-08-03 15:58 ` Alexey Budankov
2017-08-04 12:36 ` Peter Zijlstra
2017-08-03 15:00 ` Peter Zijlstra
2017-08-03 18:47 ` Alexey Budankov
2017-08-04 12:35 ` Peter Zijlstra
2017-08-04 12:51 ` Peter Zijlstra
2017-08-04 14:25 ` Alexey Budankov
2017-08-04 14:23 ` Alexey Budankov
2017-08-10 15:57 ` Alexey Budankov
2017-08-22 20:47 ` Peter Zijlstra
2017-08-23 8:54 ` Alexey Budankov
2017-08-31 17:18 ` [RFC][PATCH] perf: Rewrite enabled/running timekeeping Peter Zijlstra
2017-08-31 19:51 ` Stephane Eranian
2017-09-05 7:51 ` Stephane Eranian
2017-09-05 9:44 ` Peter Zijlstra
2017-09-01 10:45 ` Alexey Budankov
2017-09-01 12:31 ` Peter Zijlstra
2017-09-01 11:17 ` Alexey Budankov
2017-09-01 12:42 ` Peter Zijlstra
2017-09-01 21:03 ` Vince Weaver
2017-09-04 10:46 ` Alexey Budankov
2017-09-04 12:08 ` Peter Zijlstra
2017-09-04 14:56 ` Alexey Budankov
2017-09-04 15:41 ` Peter Zijlstra
2017-09-04 15:58 ` Peter Zijlstra
2017-09-05 10:17 ` Alexey Budankov
2017-09-05 11:19 ` Peter Zijlstra
2017-09-11 6:55 ` Alexey Budankov
2017-09-05 12:06 ` Alexey Budankov
2017-09-05 12:59 ` Peter Zijlstra
2017-09-05 16:03 ` Peter Zijlstra
2017-09-06 13:48 ` Alexey Budankov
2017-09-08 8:47 ` Alexey Budankov
2018-03-12 17:43 ` [tip:perf/core] perf/cor: Use RB trees for pinned/flexible groups tip-bot for Alexey Budankov
2017-08-02 8:16 ` [PATCH v6 3/3]: perf/core: add mux switch to skip to the current CPU's events list on mux interrupt Alexey Budankov
2017-08-18 5:17 ` [PATCH v7 0/2] perf/core: addressing 4x slowdown during per-process profiling of STREAM benchmark on Intel Xeon Phi Alexey Budankov
2017-08-18 5:21 ` [PATCH v7 1/2] perf/core: use rb trees for pinned/flexible groups Alexey Budankov
2017-08-23 11:17 ` Alexander Shishkin
2017-08-23 17:23 ` Alexey Budankov
2017-08-18 5:22 ` Alexey Budankov [this message]
2017-08-23 11:54 ` [PATCH v7 2/2] perf/core: add mux switch to skip to the current CPU's events list on mux interrupt Alexander Shishkin
2017-08-23 18:12 ` Alexey Budankov
2017-08-22 20:21 ` [PATCH v7 0/2] perf/core: addressing 4x slowdown during per-process profiling of STREAM benchmark on Intel Xeon Phi Peter Zijlstra
2017-08-23 8:54 ` Alexey Budankov
2017-08-31 10:12 ` Alexey Budankov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=edf9ed7e-a4d0-dbce-53cc-883f9ab40957@linux.intel.com \
--to=alexey.budankov@linux.intel.com \
--cc=Dmitry.Prohorov@intel.com \
--cc=acme@kernel.org \
--cc=ak@linux.intel.com \
--cc=alexander.shishkin@linux.intel.com \
--cc=davidcc@google.com \
--cc=eranian@google.com \
--cc=kan.liang@intel.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mark.rutland@arm.com \
--cc=mingo@redhat.com \
--cc=peterz@infradead.org \
--cc=valery.cherepennikov@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox