From: Peter Zijlstra <peterz@infradead.org>
To: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org, peterz@infradead.org,
x86@kernel.org, Linus Torvalds <torvalds@linux-foundation.org>,
Tim Chen <tim.c.chen@linux.intel.com>,
Josh Poimboeuf <jpoimboe@kernel.org>,
Andrew Cooper <Andrew.Cooper3@citrix.com>,
Pawan Gupta <pawan.kumar.gupta@linux.intel.com>,
Johannes Wikner <kwikner@ethz.ch>,
Alyssa Milburn <alyssa.milburn@linux.intel.com>,
Jann Horn <jannh@google.com>, "H.J. Lu" <hjl.tools@gmail.com>,
Joao Moreira <joao.moreira@intel.com>,
Joseph Nuzman <joseph.nuzman@intel.com>,
Steven Rostedt <rostedt@goodmis.org>,
Juergen Gross <jgross@suse.com>,
Masami Hiramatsu <mhiramat@kernel.org>,
Alexei Starovoitov <ast@kernel.org>,
Daniel Borkmann <daniel@iogearbox.net>,
K Prateek Nayak <kprateek.nayak@amd.com>,
Eric Dumazet <edumazet@google.com>
Subject: [PATCH v2 58/59] x86/ftrace: Make it call depth tracking aware
Date: Fri, 02 Sep 2022 15:07:23 +0200 [thread overview]
Message-ID: <20220902130952.375380675@infradead.org> (raw)
In-Reply-To: 20220902130625.217071627@infradead.org
From: Peter Zijlstra <peterz@infradead.org>
Since ftrace has trampolines, don't use thunks for the __fentry__ site
but instead require that every function called from there includes
accounting. This very much includes all the direct-call functions.
Additionally, ftrace uses ROP tricks in two places:
- return_to_handler(), and
- ftrace_regs_caller() when pt_regs->orig_ax is set by a direct-call.
return_to_handler() already uses a retpoline to replace an
indirect-jump to defeat IBT, since this is a jump-type retpoline, make
sure there is no accounting done and ALTERNATIVE the RET into a ret.
ftrace_regs_caller() does much the same and gets the same treatment.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
arch/x86/include/asm/nospec-branch.h | 9 +++++++++
arch/x86/kernel/callthunks.c | 2 +-
arch/x86/kernel/ftrace.c | 16 ++++++++++++----
arch/x86/kernel/ftrace_64.S | 22 ++++++++++++++++++++--
arch/x86/net/bpf_jit_comp.c | 6 ++++++
kernel/trace/trace_selftest.c | 5 ++++-
samples/ftrace/ftrace-direct-modify.c | 3 +++
samples/ftrace/ftrace-direct-multi-modify.c | 3 +++
samples/ftrace/ftrace-direct-multi.c | 2 ++
samples/ftrace/ftrace-direct-too.c | 2 ++
samples/ftrace/ftrace-direct.c | 2 ++
11 files changed, 64 insertions(+), 8 deletions(-)
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -331,6 +331,12 @@ static inline void x86_set_skl_return_th
{
x86_return_thunk = &__x86_return_skl;
}
+
+#define CALL_DEPTH_ACCOUNT \
+ ALTERNATIVE("", \
+ __stringify(INCREMENT_CALL_DEPTH), \
+ X86_FEATURE_CALL_DEPTH)
+
#ifdef CONFIG_CALL_THUNKS_DEBUG
DECLARE_PER_CPU(u64, __x86_call_count);
DECLARE_PER_CPU(u64, __x86_ret_count);
@@ -339,6 +345,9 @@ DECLARE_PER_CPU(u64, __x86_ctxsw_count);
#endif
#else
static inline void x86_set_skl_return_thunk(void) {}
+
+#define CALL_DEPTH_ACCOUNT ""
+
#endif
#ifdef CONFIG_RETPOLINE
--- a/arch/x86/kernel/callthunks.c
+++ b/arch/x86/kernel/callthunks.c
@@ -315,7 +315,7 @@ int x86_call_depth_emit_accounting(u8 **
return 0;
/* Is function call target a thunk? */
- if (is_callthunk(func))
+ if (func && is_callthunk(func))
return 0;
memcpy(*pprog, tmpl, tmpl_size);
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -69,6 +69,10 @@ static const char *ftrace_nop_replace(vo
static const char *ftrace_call_replace(unsigned long ip, unsigned long addr)
{
+ /*
+ * No need to translate into a callthunk. The trampoline does
+ * the depth accounting itself.
+ */
return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
}
@@ -317,7 +321,7 @@ create_trampoline(struct ftrace_ops *ops
unsigned long size;
unsigned long *ptr;
void *trampoline;
- void *ip;
+ void *ip, *dest;
/* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
unsigned const char retq[] = { RET_INSN_OPCODE, INT3_INSN_OPCODE };
@@ -404,10 +408,14 @@ create_trampoline(struct ftrace_ops *ops
/* put in the call to the function */
mutex_lock(&text_mutex);
call_offset -= start_offset;
+ /*
+ * No need to translate into a callthunk. The trampoline does
+ * the depth accounting before the call already.
+ */
+ dest = ftrace_ops_get_func(ops);
memcpy(trampoline + call_offset,
- text_gen_insn(CALL_INSN_OPCODE,
- trampoline + call_offset,
- ftrace_ops_get_func(ops)), CALL_INSN_SIZE);
+ text_gen_insn(CALL_INSN_OPCODE, trampoline + call_offset, dest),
+ CALL_INSN_SIZE);
mutex_unlock(&text_mutex);
/* ALLOC_TRAMP flags lets us know we created it */
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -4,6 +4,7 @@
*/
#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
#include <asm/ftrace.h>
#include <asm/export.h>
@@ -132,6 +133,7 @@
#ifdef CONFIG_DYNAMIC_FTRACE
SYM_FUNC_START(__fentry__)
+ CALL_DEPTH_ACCOUNT
RET
SYM_FUNC_END(__fentry__)
EXPORT_SYMBOL(__fentry__)
@@ -140,6 +142,8 @@ SYM_FUNC_START(ftrace_caller)
/* save_mcount_regs fills in first two parameters */
save_mcount_regs
+ CALL_DEPTH_ACCOUNT
+
/* Stack - skipping return address of ftrace_caller */
leaq MCOUNT_REG_SIZE+8(%rsp), %rcx
movq %rcx, RSP(%rsp)
@@ -155,6 +159,9 @@ SYM_INNER_LABEL(ftrace_caller_op_ptr, SY
/* Only ops with REGS flag set should have CS register set */
movq $0, CS(%rsp)
+ /* Account for the function call below */
+ CALL_DEPTH_ACCOUNT
+
SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
ANNOTATE_NOENDBR
call ftrace_stub
@@ -189,6 +196,8 @@ SYM_FUNC_START(ftrace_regs_caller)
save_mcount_regs 8
/* save_mcount_regs fills in first two parameters */
+ CALL_DEPTH_ACCOUNT
+
SYM_INNER_LABEL(ftrace_regs_caller_op_ptr, SYM_L_GLOBAL)
ANNOTATE_NOENDBR
/* Load the ftrace_ops into the 3rd parameter */
@@ -219,6 +228,9 @@ SYM_INNER_LABEL(ftrace_regs_caller_op_pt
/* regs go into 4th parameter */
leaq (%rsp), %rcx
+ /* Account for the function call below */
+ CALL_DEPTH_ACCOUNT
+
SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
ANNOTATE_NOENDBR
call ftrace_stub
@@ -282,7 +294,9 @@ SYM_INNER_LABEL(ftrace_regs_caller_end,
int3
.Ldo_rebalance:
add $8, %rsp
- RET
+ ALTERNATIVE __stringify(RET), \
+ __stringify(ANNOTATE_UNRET_SAFE; ret; int3), \
+ X86_FEATURE_CALL_DEPTH
SYM_FUNC_END(ftrace_regs_caller)
STACK_FRAME_NON_STANDARD_FP(ftrace_regs_caller)
@@ -291,6 +305,8 @@ STACK_FRAME_NON_STANDARD_FP(ftrace_regs_
#else /* ! CONFIG_DYNAMIC_FTRACE */
SYM_FUNC_START(__fentry__)
+ CALL_DEPTH_ACCOUNT
+
cmpq $ftrace_stub, ftrace_trace_function
jnz trace
@@ -347,6 +363,8 @@ SYM_CODE_START(return_to_handler)
int3
.Ldo_rop:
mov %rdi, (%rsp)
- RET
+ ALTERNATIVE __stringify(RET), \
+ __stringify(ANNOTATE_UNRET_SAFE; ret; int3), \
+ X86_FEATURE_CALL_DEPTH
SYM_CODE_END(return_to_handler)
#endif
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -12,6 +12,7 @@
#include <linux/memory.h>
#include <linux/sort.h>
#include <asm/extable.h>
+#include <asm/ftrace.h>
#include <asm/set_memory.h>
#include <asm/nospec-branch.h>
#include <asm/text-patching.h>
@@ -2095,6 +2096,11 @@ int arch_prepare_bpf_trampoline(struct b
prog = image;
EMIT_ENDBR();
+ /*
+ * This is the direct-call trampoline, as such it needs accounting
+ * for the __fentry__ call.
+ */
+ x86_call_depth_emit_accounting(&prog, NULL);
EMIT1(0x55); /* push rbp */
EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -785,7 +785,10 @@ static struct fgraph_ops fgraph_ops __in
};
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
-noinline __noclone static void trace_direct_tramp(void) { }
+noinline __noclone static void trace_direct_tramp(void)
+{
+ asm(CALL_DEPTH_ACCOUNT);
+}
#endif
/*
--- a/samples/ftrace/ftrace-direct-modify.c
+++ b/samples/ftrace/ftrace-direct-modify.c
@@ -3,6 +3,7 @@
#include <linux/kthread.h>
#include <linux/ftrace.h>
#include <asm/asm-offsets.h>
+#include <asm/nospec-branch.h>
extern void my_direct_func1(void);
extern void my_direct_func2(void);
@@ -34,6 +35,7 @@ asm (
ASM_ENDBR
" pushq %rbp\n"
" movq %rsp, %rbp\n"
+ CALL_DEPTH_ACCOUNT
" call my_direct_func1\n"
" leave\n"
" .size my_tramp1, .-my_tramp1\n"
@@ -45,6 +47,7 @@ asm (
ASM_ENDBR
" pushq %rbp\n"
" movq %rsp, %rbp\n"
+ CALL_DEPTH_ACCOUNT
" call my_direct_func2\n"
" leave\n"
ASM_RET
--- a/samples/ftrace/ftrace-direct-multi-modify.c
+++ b/samples/ftrace/ftrace-direct-multi-modify.c
@@ -3,6 +3,7 @@
#include <linux/kthread.h>
#include <linux/ftrace.h>
#include <asm/asm-offsets.h>
+#include <asm/nospec-branch.h>
extern void my_direct_func1(unsigned long ip);
extern void my_direct_func2(unsigned long ip);
@@ -32,6 +33,7 @@ asm (
ASM_ENDBR
" pushq %rbp\n"
" movq %rsp, %rbp\n"
+ CALL_DEPTH_ACCOUNT
" pushq %rdi\n"
" movq 8(%rbp), %rdi\n"
" call my_direct_func1\n"
@@ -46,6 +48,7 @@ asm (
ASM_ENDBR
" pushq %rbp\n"
" movq %rsp, %rbp\n"
+ CALL_DEPTH_ACCOUNT
" pushq %rdi\n"
" movq 8(%rbp), %rdi\n"
" call my_direct_func2\n"
--- a/samples/ftrace/ftrace-direct-multi.c
+++ b/samples/ftrace/ftrace-direct-multi.c
@@ -5,6 +5,7 @@
#include <linux/ftrace.h>
#include <linux/sched/stat.h>
#include <asm/asm-offsets.h>
+#include <asm/nospec-branch.h>
extern void my_direct_func(unsigned long ip);
@@ -27,6 +28,7 @@ asm (
ASM_ENDBR
" pushq %rbp\n"
" movq %rsp, %rbp\n"
+ CALL_DEPTH_ACCOUNT
" pushq %rdi\n"
" movq 8(%rbp), %rdi\n"
" call my_direct_func\n"
--- a/samples/ftrace/ftrace-direct-too.c
+++ b/samples/ftrace/ftrace-direct-too.c
@@ -4,6 +4,7 @@
#include <linux/mm.h> /* for handle_mm_fault() */
#include <linux/ftrace.h>
#include <asm/asm-offsets.h>
+#include <asm/nospec-branch.h>
extern void my_direct_func(struct vm_area_struct *vma,
unsigned long address, unsigned int flags);
@@ -29,6 +30,7 @@ asm (
ASM_ENDBR
" pushq %rbp\n"
" movq %rsp, %rbp\n"
+ CALL_DEPTH_ACCOUNT
" pushq %rdi\n"
" pushq %rsi\n"
" pushq %rdx\n"
--- a/samples/ftrace/ftrace-direct.c
+++ b/samples/ftrace/ftrace-direct.c
@@ -4,6 +4,7 @@
#include <linux/sched.h> /* for wake_up_process() */
#include <linux/ftrace.h>
#include <asm/asm-offsets.h>
+#include <asm/nospec-branch.h>
extern void my_direct_func(struct task_struct *p);
@@ -26,6 +27,7 @@ asm (
ASM_ENDBR
" pushq %rbp\n"
" movq %rsp, %rbp\n"
+ CALL_DEPTH_ACCOUNT
" pushq %rdi\n"
" call my_direct_func\n"
" popq %rdi\n"
next prev parent reply other threads:[~2022-09-02 14:30 UTC|newest]
Thread overview: 81+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-09-02 13:06 [PATCH v2 00/59] x86/retbleed: Call depth tracking mitigation Peter Zijlstra
2022-09-02 13:06 ` [PATCH v2 01/59] x86/paravirt: Ensure proper alignment Peter Zijlstra
2022-09-02 16:05 ` Juergen Gross
2022-09-02 13:06 ` [PATCH v2 02/59] x86/cpu: Remove segment load from switch_to_new_gdt() Peter Zijlstra
2022-09-02 13:06 ` [PATCH v2 03/59] x86/cpu: Get rid of redundant switch_to_new_gdt() invocations Peter Zijlstra
2022-09-02 13:06 ` [PATCH v2 04/59] x86/cpu: Re-enable stackprotector Peter Zijlstra
2022-09-02 13:06 ` [PATCH v2 05/59] x86/modules: Set VM_FLUSH_RESET_PERMS in module_alloc() Peter Zijlstra
2022-09-02 13:06 ` [PATCH v2 06/59] x86/vdso: Ensure all kernel code is seen by objtool Peter Zijlstra
2022-09-02 13:06 ` [PATCH v2 07/59] x86: Sanitize linker script Peter Zijlstra
2022-09-02 13:06 ` [PATCH v2 08/59] x86/build: Ensure proper function alignment Peter Zijlstra
2022-09-02 16:51 ` Linus Torvalds
2022-09-02 17:32 ` Peter Zijlstra
2022-09-02 18:08 ` Linus Torvalds
2022-09-05 10:04 ` Peter Zijlstra
2022-09-12 14:09 ` Linus Torvalds
2022-09-12 19:44 ` Peter Zijlstra
2022-09-13 8:08 ` Peter Zijlstra
2022-09-13 13:08 ` Linus Torvalds
2022-09-05 2:09 ` David Laight
2022-09-02 13:06 ` [PATCH v2 09/59] x86/asm: " Peter Zijlstra
2022-09-02 13:06 ` [PATCH v2 10/59] x86/error_inject: Align function properly Peter Zijlstra
2022-09-02 13:06 ` [PATCH v2 11/59] x86/paravirt: Properly align PV functions Peter Zijlstra
2022-09-02 13:06 ` [PATCH v2 12/59] x86/entry: Align SYM_CODE_START() variants Peter Zijlstra
2022-09-02 13:06 ` [PATCH v2 13/59] crypto: x86/camellia: Remove redundant alignments Peter Zijlstra
2022-09-02 13:06 ` [PATCH v2 14/59] crypto: x86/cast5: " Peter Zijlstra
2022-09-02 13:06 ` [PATCH v2 15/59] crypto: x86/crct10dif-pcl: " Peter Zijlstra
2022-09-02 13:06 ` [PATCH v2 16/59] crypto: x86/serpent: " Peter Zijlstra
2022-09-02 13:06 ` [PATCH v2 17/59] crypto: x86/sha1: Remove custom alignments Peter Zijlstra
2022-09-02 13:06 ` [PATCH v2 18/59] crypto: x86/sha256: " Peter Zijlstra
2022-09-02 13:06 ` [PATCH v2 19/59] crypto: x86/sm[34]: Remove redundant alignments Peter Zijlstra
2022-09-02 13:06 ` [PATCH v2 20/59] crypto: twofish: " Peter Zijlstra
2022-09-02 13:06 ` [PATCH v2 21/59] crypto: x86/poly1305: Remove custom function alignment Peter Zijlstra
2022-09-02 13:06 ` [PATCH v2 22/59] x86: Put hot per CPU variables into a struct Peter Zijlstra
2022-09-02 18:02 ` Jann Horn
2022-09-15 11:22 ` Peter Zijlstra
2022-09-02 13:06 ` [PATCH v2 23/59] x86/percpu: Move preempt_count next to current_task Peter Zijlstra
2022-09-02 13:06 ` [PATCH v2 24/59] x86/percpu: Move cpu_number " Peter Zijlstra
2022-09-02 13:06 ` [PATCH v2 25/59] x86/percpu: Move current_top_of_stack " Peter Zijlstra
2022-09-02 13:06 ` [PATCH v2 26/59] x86/percpu: Move irq_stack variables " Peter Zijlstra
2022-09-02 13:06 ` [PATCH v2 27/59] x86/softirq: Move softirq pending next to current task Peter Zijlstra
2022-09-02 13:06 ` [PATCH v2 28/59] objtool: Allow !PC relative relocations Peter Zijlstra
2022-09-02 13:06 ` [PATCH v2 29/59] objtool: Track init section Peter Zijlstra
2022-09-02 13:06 ` [PATCH v2 30/59] objtool: Add .call_sites section Peter Zijlstra
2022-09-02 13:06 ` [PATCH v2 31/59] objtool: Add --hacks=skylake Peter Zijlstra
2022-09-02 13:06 ` [PATCH v2 32/59] objtool: Allow STT_NOTYPE -> STT_FUNC+0 tail-calls Peter Zijlstra
2022-09-02 13:06 ` [PATCH v2 33/59] objtool: Fix find_{symbol,func}_containing() Peter Zijlstra
2022-09-02 13:06 ` [PATCH v2 34/59] objtool: Allow symbol range comparisons for IBT/ENDBR Peter Zijlstra
2022-09-02 13:07 ` [PATCH v2 35/59] x86/entry: Make sync_regs() invocation a tail call Peter Zijlstra
2022-09-02 13:07 ` [PATCH v2 36/59] ftrace: Add HAVE_DYNAMIC_FTRACE_NO_PATCHABLE Peter Zijlstra
2022-09-02 13:07 ` [PATCH v2 37/59] x86/putuser: Provide room for padding Peter Zijlstra
2022-09-02 16:43 ` Linus Torvalds
2022-09-02 17:03 ` Peter Zijlstra
2022-09-02 20:24 ` Peter Zijlstra
2022-09-02 21:46 ` Linus Torvalds
2022-09-03 17:26 ` Linus Torvalds
2022-09-05 7:16 ` Peter Zijlstra
2022-09-05 11:26 ` Peter Zijlstra
2022-09-02 13:07 ` [PATCH v2 38/59] x86/Kconfig: Add CONFIG_CALL_THUNKS Peter Zijlstra
2022-09-02 13:07 ` [PATCH v2 39/59] x86/Kconfig: Introduce function padding Peter Zijlstra
2022-09-02 13:07 ` [PATCH v2 40/59] x86/retbleed: Add X86_FEATURE_CALL_DEPTH Peter Zijlstra
2022-09-02 13:07 ` [PATCH v2 41/59] x86/alternatives: Provide text_poke_copy_locked() Peter Zijlstra
2022-09-02 13:07 ` [PATCH v2 42/59] x86/entry: Make some entry symbols global Peter Zijlstra
2022-09-02 13:07 ` [PATCH v2 43/59] x86/paravirt: Make struct paravirt_call_site unconditionally available Peter Zijlstra
2022-09-02 16:09 ` Juergen Gross
2022-09-02 13:07 ` [PATCH v2 44/59] x86/callthunks: Add call patching for call depth tracking Peter Zijlstra
2022-09-02 13:07 ` [PATCH v2 45/59] x86/modules: Add call patching Peter Zijlstra
2022-09-02 13:07 ` [PATCH v2 46/59] x86/returnthunk: Allow different return thunks Peter Zijlstra
2022-09-02 13:07 ` [PATCH v2 47/59] x86/asm: Provide ALTERNATIVE_3 Peter Zijlstra
2022-09-02 13:07 ` [PATCH v2 48/59] x86/retbleed: Add SKL return thunk Peter Zijlstra
2022-09-02 13:07 ` [PATCH v2 49/59] x86/retpoline: Add SKL retthunk retpolines Peter Zijlstra
2022-09-02 13:07 ` [PATCH v2 50/59] x86/retbleed: Add SKL call thunk Peter Zijlstra
2022-09-02 13:07 ` [PATCH v2 51/59] x86/calldepth: Add ret/call counting for debug Peter Zijlstra
2022-09-02 13:07 ` [PATCH v2 52/59] static_call: Add call depth tracking support Peter Zijlstra
2022-09-02 13:07 ` [PATCH v2 53/59] kallsyms: Take callthunks into account Peter Zijlstra
2022-09-02 13:07 ` [PATCH v2 54/59] x86/orc: Make it callthunk aware Peter Zijlstra
2022-09-02 13:07 ` [PATCH v2 55/59] x86/bpf: Emit call depth accounting if required Peter Zijlstra
2022-09-02 13:07 ` [PATCH v2 56/59] x86/ftrace: Remove ftrace_epilogue() Peter Zijlstra
2022-09-02 13:07 ` [PATCH v2 57/59] x86/ftrace: Rebalance RSB Peter Zijlstra
2022-09-02 13:07 ` Peter Zijlstra [this message]
2022-09-02 13:07 ` [PATCH v2 59/59] x86/retbleed: Add call depth tracking mitigation Peter Zijlstra
2022-09-16 9:35 ` [PATCH v2 00/59] x86/retbleed: Call " Mel Gorman
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220902130952.375380675@infradead.org \
--to=peterz@infradead.org \
--cc=Andrew.Cooper3@citrix.com \
--cc=alyssa.milburn@linux.intel.com \
--cc=ast@kernel.org \
--cc=daniel@iogearbox.net \
--cc=edumazet@google.com \
--cc=hjl.tools@gmail.com \
--cc=jannh@google.com \
--cc=jgross@suse.com \
--cc=joao.moreira@intel.com \
--cc=joseph.nuzman@intel.com \
--cc=jpoimboe@kernel.org \
--cc=kprateek.nayak@amd.com \
--cc=kwikner@ethz.ch \
--cc=linux-kernel@vger.kernel.org \
--cc=mhiramat@kernel.org \
--cc=pawan.kumar.gupta@linux.intel.com \
--cc=rostedt@goodmis.org \
--cc=tglx@linutronix.de \
--cc=tim.c.chen@linux.intel.com \
--cc=torvalds@linux-foundation.org \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox