public inbox for linux-hwmon@vger.kernel.org
 help / color / mirror / Atom feed
From: "Jürgen Groß" <jgross@suse.com>
To: "Xin Li (Intel)" <xin@zytor.com>,
	linux-kernel@vger.kernel.org, kvm@vger.kernel.org,
	linux-perf-users@vger.kernel.org, linux-hyperv@vger.kernel.org,
	virtualization@lists.linux.dev, linux-pm@vger.kernel.org,
	linux-edac@vger.kernel.org, xen-devel@lists.xenproject.org,
	linux-acpi@vger.kernel.org, linux-hwmon@vger.kernel.org,
	netdev@vger.kernel.org, platform-driver-x86@vger.kernel.org
Cc: tglx@linutronix.de, mingo@redhat.com, bp@alien8.de,
	dave.hansen@linux.intel.com, x86@kernel.org, hpa@zytor.com,
	acme@kernel.org, andrew.cooper3@citrix.com, peterz@infradead.org,
	namhyung@kernel.org, mark.rutland@arm.com,
	alexander.shishkin@linux.intel.com, jolsa@kernel.org,
	irogers@google.com, adrian.hunter@intel.com,
	kan.liang@linux.intel.com, wei.liu@kernel.org,
	ajay.kaher@broadcom.com, bcm-kernel-feedback-list@broadcom.com,
	tony.luck@intel.com, pbonzini@redhat.com, vkuznets@redhat.com,
	seanjc@google.com, luto@kernel.org, boris.ostrovsky@oracle.com,
	kys@microsoft.com, haiyangz@microsoft.com, decui@microsoft.com
Subject: Re: [RFC PATCH v2 22/34] x86/msr: Utilize the alternatives mechanism to read MSR
Date: Tue, 22 Apr 2025 13:12:29 +0200	[thread overview]
Message-ID: <080351cb-6c3d-4540-953d-6205f1ff0745@suse.com> (raw)
In-Reply-To: <20250422082216.1954310-23-xin@zytor.com>


[-- Attachment #1.1.1: Type: text/plain, Size: 10126 bytes --]

On 22.04.25 10:22, Xin Li (Intel) wrote:
> To eliminate the indirect call overhead introduced by the pv_ops API,
> utilize the alternatives mechanism to read MSR:
> 
>      1) When built with !CONFIG_XEN_PV, X86_FEATURE_XENPV becomes a
>         disabled feature, preventing the Xen code from being built
>         and ensuring the native code is executed unconditionally.
> 
>      2) When built with CONFIG_XEN_PV:
> 
>         2.1) If not running on the Xen hypervisor (!X86_FEATURE_XENPV),
>              the kernel runtime binary is patched to unconditionally
>              jump to the native MSR read code.
> 
>         2.2) If running on the Xen hypervisor (X86_FEATURE_XENPV), the
>              kernel runtime binary is patched to unconditionally jump
>              to the Xen MSR read code.
> 
> The alternatives mechanism is also used to choose the new immediate
> form MSR read instruction when it's available.
> 
> Consequently, remove the pv_ops MSR read APIs and the Xen callbacks.
> 
> Suggested-by: H. Peter Anvin (Intel) <hpa@zytor.com>
> Signed-off-by: Xin Li (Intel) <xin@zytor.com>
> ---
>   arch/x86/include/asm/msr.h            | 277 +++++++++++++++++++-------
>   arch/x86/include/asm/paravirt.h       |  40 ----
>   arch/x86/include/asm/paravirt_types.h |   9 -
>   arch/x86/kernel/paravirt.c            |   2 -
>   arch/x86/xen/enlighten_pv.c           |  48 ++---
>   arch/x86/xen/xen-asm.S                |  49 +++++
>   arch/x86/xen/xen-ops.h                |   7 +
>   7 files changed, 279 insertions(+), 153 deletions(-)
> 
> diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
> index bd3bdb3c3d23..5271cb002b23 100644
> --- a/arch/x86/include/asm/msr.h
> +++ b/arch/x86/include/asm/msr.h
> @@ -75,6 +75,7 @@ static inline void do_trace_rdpmc(u32 msr, u64 val, int failed) {}
>   #endif
>   
>   #ifdef CONFIG_XEN_PV
> +extern void asm_xen_read_msr(void);
>   extern void asm_xen_write_msr(void);
>   extern u64 xen_read_pmc(int counter);
>   #endif
> @@ -88,6 +89,8 @@ extern u64 xen_read_pmc(int counter);
>   
>   /* The GNU Assembler (Gas) with Binutils 2.41 adds the .insn directive support */
>   #if defined(CONFIG_AS_IS_GNU) && CONFIG_AS_VERSION >= 24100
> +#define ASM_RDMSR_IMM			\
> +	" .insn VEX.128.F2.M7.W0 0xf6 /0, %[msr]%{:u32}, %[val]\n\t"
>   #define ASM_WRMSRNS_IMM			\
>   	" .insn VEX.128.F3.M7.W0 0xf6 /0, %[val], %[msr]%{:u32}\n\t"
>   #else
> @@ -97,10 +100,17 @@ extern u64 xen_read_pmc(int counter);
>    * The register operand is encoded as %rax because all uses of the immediate
>    * form MSR access instructions reference %rax as the register operand.
>    */
> +#define ASM_RDMSR_IMM			\
> +	" .byte 0xc4,0xe7,0x7b,0xf6,0xc0; .long %c[msr]"
>   #define ASM_WRMSRNS_IMM			\
>   	" .byte 0xc4,0xe7,0x7a,0xf6,0xc0; .long %c[msr]"
>   #endif
>   
> +#define RDMSR_AND_SAVE_RESULT		\
> +	"rdmsr\n\t"			\
> +	"shl $0x20, %%rdx\n\t"		\
> +	"or %%rdx, %%rax\n\t"
> +
>   #define PREPARE_RDX_FOR_WRMSR		\
>   	"mov %%rax, %%rdx\n\t"		\
>   	"shr $0x20, %%rdx\n\t"
> @@ -127,35 +137,135 @@ static __always_inline bool is_msr_imm_insn(void *ip)
>   #endif
>   }
>   
> -static __always_inline u64 __rdmsr(u32 msr)
> +/*
> + * There are two sets of APIs for MSR accesses: native APIs and generic APIs.
> + * Native MSR APIs execute MSR instructions directly, regardless of whether the
> + * CPU is paravirtualized or native.  Generic MSR APIs determine the appropriate
> + * MSR access method at runtime, allowing them to be used generically on both
> + * paravirtualized and native CPUs.
> + *
> + * When the compiler can determine the MSR number at compile time, the APIs
> + * with the suffix _constant() are used to enable the immediate form MSR
> + * instructions when available.  The APIs with the suffix _variable() are
> + * used when the MSR number is not known until run time.
> + *
> + * Below is a diagram illustrating the derivation of the MSR read APIs:
> + *
> + *      __native_rdmsrq_variable()    __native_rdmsrq_constant()
> + *                         \           /
> + *                          \         /
> + *                         __native_rdmsrq()   -----------------------
> + *                            /     \                                |
> + *                           /       \                               |
> + *               native_rdmsrq()    native_read_msr_safe()           |
> + *                   /    \                                          |
> + *                  /      \                                         |
> + *      native_rdmsr()    native_read_msr()                          |
> + *                                                                   |
> + *                                                                   |
> + *                                                                   |
> + *                    __xenpv_rdmsrq()                               |
> + *                         |                                         |
> + *                         |                                         |
> + *                      __rdmsrq()   <--------------------------------
> + *                       /    \
> + *                      /      \
> + *                 rdmsrq()   rdmsrq_safe()
> + *                    /          \
> + *                   /            \
> + *                rdmsr()        rdmsr_safe()
> + */
> +
> +static __always_inline bool __native_rdmsrq_variable(u32 msr, u64 *val, int type)
>   {
> -	DECLARE_ARGS(val, low, high);
> +#ifdef CONFIG_X86_64
> +	BUILD_BUG_ON(__builtin_constant_p(msr));
>   
> -	asm volatile("1: rdmsr\n"
> -		     "2:\n"
> -		     _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_RDMSR)
> -		     : EAX_EDX_RET(val, low, high) : "c" (msr));
> +	asm_inline volatile goto(
> +		"1:\n"
> +		RDMSR_AND_SAVE_RESULT
> +		_ASM_EXTABLE_TYPE(1b, %l[badmsr], %c[type])	/* For RDMSR */
>   
> -	return EAX_EDX_VAL(val, low, high);
> +		: [val] "=a" (*val)
> +		: "c" (msr), [type] "i" (type)
> +		: "rdx"
> +		: badmsr);
> +#else
> +	asm_inline volatile goto(
> +		"1: rdmsr\n\t"
> +		_ASM_EXTABLE_TYPE(1b, %l[badmsr], %c[type])	/* For RDMSR */
> +
> +		: "=A" (*val)
> +		: "c" (msr), [type] "i" (type)
> +		:
> +		: badmsr);
> +#endif
> +
> +	return false;
> +
> +badmsr:
> +	*val = 0;
> +
> +	return true;
>   }
>   
> -#define native_rdmsr(msr, val1, val2)			\
> -do {							\
> -	u64 __val = __rdmsr((msr));			\
> -	(void)((val1) = (u32)__val);			\
> -	(void)((val2) = (u32)(__val >> 32));		\
> -} while (0)
> +#ifdef CONFIG_X86_64
> +static __always_inline bool __native_rdmsrq_constant(u32 msr, u64 *val, int type)
> +{
> +	BUILD_BUG_ON(!__builtin_constant_p(msr));
> +
> +	asm_inline volatile goto(
> +		"1:\n"
> +		ALTERNATIVE("mov %[msr], %%ecx\n\t"
> +			    "2:\n"
> +			    RDMSR_AND_SAVE_RESULT,
> +			    ASM_RDMSR_IMM,
> +			    X86_FEATURE_MSR_IMM)
> +		_ASM_EXTABLE_TYPE(1b, %l[badmsr], %c[type])	/* For RDMSR immediate */
> +		_ASM_EXTABLE_TYPE(2b, %l[badmsr], %c[type])	/* For RDMSR */
> +
> +		: [val] "=a" (*val)
> +		: [msr] "i" (msr), [type] "i" (type)
> +		: "ecx", "rdx"
> +		: badmsr);
> +
> +	return false;
> +
> +badmsr:
> +	*val = 0;
> +
> +	return true;
> +}
> +#endif
> +
> +static __always_inline bool __native_rdmsrq(u32 msr, u64 *val, int type)
> +{
> +#ifdef CONFIG_X86_64
> +	if (__builtin_constant_p(msr))
> +		return __native_rdmsrq_constant(msr, val, type);
> +#endif
> +
> +	return __native_rdmsrq_variable(msr, val, type);
> +}
>   
>   static __always_inline u64 native_rdmsrq(u32 msr)
>   {
> -	return __rdmsr(msr);
> +	u64 val = 0;
> +
> +	__native_rdmsrq(msr, &val, EX_TYPE_RDMSR);
> +	return val;
>   }
>   
> +#define native_rdmsr(msr, low, high)			\
> +do {							\
> +	u64 __val = native_rdmsrq(msr);			\
> +	(void)((low) = (u32)__val);			\
> +	(void)((high) = (u32)(__val >> 32));		\
> +} while (0)
> +
>   static inline u64 native_read_msr(u32 msr)
>   {
> -	u64 val;
> -
> -	val = __rdmsr(msr);
> +	u64 val = native_rdmsrq(msr);
>   
>   	if (tracepoint_enabled(read_msr))
>   		do_trace_read_msr(msr, val, 0);
> @@ -163,36 +273,91 @@ static inline u64 native_read_msr(u32 msr)
>   	return val;
>   }
>   
> -static inline int native_read_msr_safe(u32 msr, u64 *p)
> +static inline int native_read_msr_safe(u32 msr, u64 *val)
>   {
>   	int err;
> -	DECLARE_ARGS(val, low, high);
>   
> -	asm volatile("1: rdmsr ; xor %[err],%[err]\n"
> -		     "2:\n\t"
> -		     _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_RDMSR_SAFE, %[err])
> -		     : [err] "=r" (err), EAX_EDX_RET(val, low, high)
> -		     : "c" (msr));
> -	if (tracepoint_enabled(read_msr))
> -		do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), err);
> +	err = __native_rdmsrq(msr, val, EX_TYPE_RDMSR_SAFE) ? -EIO : 0;
>   
> -	*p = EAX_EDX_VAL(val, low, high);
> +	if (tracepoint_enabled(read_msr))
> +		do_trace_read_msr(msr, *val, err);
>   
>   	return err;
>   }
>   
> +#ifdef CONFIG_XEN_PV
> +/* No plan to support immediate form MSR instructions in Xen */
> +static __always_inline bool __xenpv_rdmsrq(u32 msr, u64 *val, int type)
> +{
> +	asm_inline volatile goto(
> +		"1: call asm_xen_read_msr\n\t"
> +		_ASM_EXTABLE_TYPE(1b, %l[badmsr], %c[type])	/* For CALL */
> +
> +		: [val] "=a" (*val), ASM_CALL_CONSTRAINT
> +		: "c" (msr), [type] "i" (type)
> +		: "rdx"
> +		: badmsr);
> +
> +	return false;
> +
> +badmsr:
> +	*val = 0;
> +
> +	return true;
> +}
> +#endif
> +
> +static __always_inline bool __rdmsrq(u32 msr, u64 *val, int type)
> +{
> +	bool ret;
> +
> +#ifdef CONFIG_XEN_PV
> +	if (cpu_feature_enabled(X86_FEATURE_XENPV))
> +		return __xenpv_rdmsrq(msr, val, type);

I don't think this will work for the Xen PV case.

X86_FEATURE_XENPV is set only after the first MSR is being read.

This can be fixed by setting the feature earlier, but it shows that the
paravirt feature has its benefits in such cases.


Juergen

[-- Attachment #1.1.2: OpenPGP public key --]
[-- Type: application/pgp-keys, Size: 3743 bytes --]

[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 495 bytes --]

  parent reply	other threads:[~2025-04-22 11:12 UTC|newest]

Thread overview: 94+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-04-22  8:21 [RFC PATCH v2 00/34] MSR refactor with new MSR instructions support Xin Li (Intel)
2025-04-22  8:21 ` [RFC PATCH v2 01/34] x86/msr: Move rdtsc{,_ordered}() to <asm/tsc.h> Xin Li (Intel)
2025-04-23 14:13   ` Dave Hansen
2025-04-23 17:12     ` Xin Li
2025-04-22  8:21 ` [RFC PATCH v2 02/34] x86/msr: Remove rdpmc() Xin Li (Intel)
2025-04-23 14:23   ` Dave Hansen
2025-04-22  8:21 ` [RFC PATCH v2 03/34] x86/msr: Rename rdpmcl() to rdpmcq() Xin Li (Intel)
2025-04-23 14:24   ` Dave Hansen
2025-04-23 14:28   ` Sean Christopherson
2025-04-23 15:06     ` Dave Hansen
2025-04-23 17:23       ` Xin Li
2025-04-22  8:21 ` [RFC PATCH v2 04/34] x86/msr: Convert rdpmcq() into a function Xin Li (Intel)
2025-04-23 14:25   ` Dave Hansen
2025-04-22  8:21 ` [RFC PATCH v2 05/34] x86/msr: Return u64 consistently in Xen PMC read functions Xin Li (Intel)
2025-04-22  8:40   ` Jürgen Groß
2025-04-22  8:21 ` [RFC PATCH v2 06/34] x86/msr: Use the alternatives mechanism to read PMC Xin Li (Intel)
2025-04-22  8:38   ` Jürgen Groß
2025-04-22  9:12     ` Xin Li
2025-04-22  9:28       ` Juergen Gross
2025-04-23  7:40         ` Xin Li
2025-04-22  8:21 ` [RFC PATCH v2 07/34] x86/msr: Convert __wrmsr() uses to native_wrmsr{,q}() uses Xin Li (Intel)
2025-04-22  8:21 ` [RFC PATCH v2 08/34] x86/msr: Convert a native_wrmsr() use to native_wrmsrq() Xin Li (Intel)
2025-04-23 15:51   ` Dave Hansen
2025-04-23 17:27     ` Xin Li
2025-04-23 23:23     ` Xin Li
2025-04-22  8:21 ` [RFC PATCH v2 09/34] x86/msr: Add the native_rdmsrq() helper Xin Li (Intel)
2025-04-22  8:21 ` [RFC PATCH v2 10/34] x86/msr: Convert __rdmsr() uses to native_rdmsrq() uses Xin Li (Intel)
2025-04-22 15:09   ` Sean Christopherson
2025-04-23  9:27     ` Xin Li
2025-04-23 13:37       ` Sean Christopherson
2025-04-23 14:02       ` Dave Hansen
2025-04-22  8:21 ` [RFC PATCH v2 11/34] x86/msr: Remove calling native_{read,write}_msr{,_safe}() in pmu_msr_{read,write}() Xin Li (Intel)
2025-04-24  6:25   ` Mi, Dapeng
2025-04-24  7:16     ` Xin Li
2025-04-22  8:21 ` [RFC PATCH v2 12/34] x86/msr: Remove pmu_msr_{read,write}() Xin Li (Intel)
2025-04-24  6:33   ` Mi, Dapeng
2025-04-24  7:21     ` Xin Li
2025-04-24  7:43       ` Mi, Dapeng
2025-04-24  7:50         ` Xin Li
2025-04-24 10:05   ` Jürgen Groß
2025-04-24 17:49     ` Xin Li
2025-04-24 21:14       ` H. Peter Anvin
2025-04-24 22:24         ` Xin Li
2025-04-22  8:21 ` [RFC PATCH v2 13/34] x86/xen/msr: Remove the error pointer argument from set_reg() Xin Li (Intel)
2025-04-24 10:11   ` Jürgen Groß
2025-04-24 17:50     ` Xin Li
2025-04-22  8:21 ` [RFC PATCH v2 14/34] x86/msr: refactor pv_cpu_ops.write_msr{_safe}() Xin Li (Intel)
2025-04-24 10:16   ` Jürgen Groß
2025-04-22  8:21 ` [RFC PATCH v2 15/34] x86/msr: Replace wrmsr(msr, low, 0) with wrmsrq(msr, low) Xin Li (Intel)
2025-04-22  8:21 ` [RFC PATCH v2 16/34] x86/msr: Change function type of native_read_msr_safe() Xin Li (Intel)
2025-04-22  8:21 ` [RFC PATCH v2 17/34] x86/cpufeatures: Add a CPU feature bit for MSR immediate form instructions Xin Li (Intel)
2025-04-22  8:21 ` [RFC PATCH v2 18/34] x86/opcode: Add immediate form MSR instructions Xin Li (Intel)
2025-04-22  8:22 ` [RFC PATCH v2 19/34] x86/extable: Add support for " Xin Li (Intel)
2025-04-22  8:22 ` [RFC PATCH v2 20/34] x86/extable: Implement EX_TYPE_FUNC_REWIND Xin Li (Intel)
2025-04-22  8:22 ` [RFC PATCH v2 21/34] x86/msr: Utilize the alternatives mechanism to write MSR Xin Li (Intel)
2025-04-22  9:57   ` Jürgen Groß
2025-04-23  8:51     ` Xin Li
2025-04-23 16:05       ` Jürgen Groß
2025-04-24  8:06         ` Xin Li
2025-04-24  8:14           ` Jürgen Groß
2025-04-25  1:15             ` H. Peter Anvin
2025-04-25  3:44               ` H. Peter Anvin
2025-04-25  7:01                 ` Jürgen Groß
2025-04-25 15:28                   ` H. Peter Anvin
2025-04-25  6:51               ` Jürgen Groß
2025-04-25 12:33         ` Peter Zijlstra
2025-04-25 12:51           ` Jürgen Groß
2025-04-25 20:12             ` H. Peter Anvin
2025-04-25 15:29           ` H. Peter Anvin
2025-04-25  7:11     ` Peter Zijlstra
2025-04-22  8:22 ` [RFC PATCH v2 22/34] x86/msr: Utilize the alternatives mechanism to read MSR Xin Li (Intel)
2025-04-22  8:59   ` Jürgen Groß
2025-04-22  9:20     ` Xin Li
2025-04-22  9:57       ` Jürgen Groß
2025-04-22 11:12   ` Jürgen Groß [this message]
2025-04-23  9:03     ` Xin Li
2025-04-23 16:11       ` Jürgen Groß
2025-04-22  8:22 ` [RFC PATCH v2 23/34] x86/extable: Remove new dead code in ex_handler_msr() Xin Li (Intel)
2025-04-22  8:22 ` [RFC PATCH v2 24/34] x86/mce: Use native MSR API __native_{wr,rd}msrq() Xin Li (Intel)
2025-04-22  8:22 ` [RFC PATCH v2 25/34] x86/msr: Rename native_wrmsrq() to native_wrmsrq_no_trace() Xin Li (Intel)
2025-04-22  8:22 ` [RFC PATCH v2 26/34] x86/msr: Rename native_wrmsr() to native_wrmsr_no_trace() Xin Li (Intel)
2025-04-22  8:22 ` [RFC PATCH v2 27/34] x86/msr: Rename native_write_msr() to native_wrmsrq() Xin Li (Intel)
2025-04-22  8:22 ` [RFC PATCH v2 28/34] x86/msr: Rename native_write_msr_safe() to native_wrmsrq_safe() Xin Li (Intel)
2025-04-22  8:22 ` [RFC PATCH v2 29/34] x86/msr: Rename native_rdmsrq() to native_rdmsrq_no_trace() Xin Li (Intel)
2025-04-22  8:22 ` [RFC PATCH v2 30/34] x86/msr: Rename native_rdmsr() to native_rdmsr_no_trace() Xin Li (Intel)
2025-04-22  8:22 ` [RFC PATCH v2 31/34] x86/msr: Rename native_read_msr() to native_rdmsrq() Xin Li (Intel)
2025-04-22  8:22 ` [RFC PATCH v2 32/34] x86/msr: Rename native_read_msr_safe() to native_rdmsrq_safe() Xin Li (Intel)
2025-04-22  8:22 ` [RFC PATCH v2 33/34] x86/msr: Move the ARGS macros after the MSR read/write APIs Xin Li (Intel)
2025-04-22  8:22 ` [RFC PATCH v2 34/34] x86/msr: Convert native_rdmsr_no_trace() uses to native_rdmsrq_no_trace() uses Xin Li (Intel)
2025-04-22 15:03 ` [RFC PATCH v2 00/34] MSR refactor with new MSR instructions support Sean Christopherson
2025-04-22 17:51   ` Xin Li
2025-04-22 18:05     ` Luck, Tony
2025-04-22 19:44       ` Ingo Molnar
2025-04-22 19:51         ` Sean Christopherson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=080351cb-6c3d-4540-953d-6205f1ff0745@suse.com \
    --to=jgross@suse.com \
    --cc=acme@kernel.org \
    --cc=adrian.hunter@intel.com \
    --cc=ajay.kaher@broadcom.com \
    --cc=alexander.shishkin@linux.intel.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=bcm-kernel-feedback-list@broadcom.com \
    --cc=boris.ostrovsky@oracle.com \
    --cc=bp@alien8.de \
    --cc=dave.hansen@linux.intel.com \
    --cc=decui@microsoft.com \
    --cc=haiyangz@microsoft.com \
    --cc=hpa@zytor.com \
    --cc=irogers@google.com \
    --cc=jolsa@kernel.org \
    --cc=kan.liang@linux.intel.com \
    --cc=kvm@vger.kernel.org \
    --cc=kys@microsoft.com \
    --cc=linux-acpi@vger.kernel.org \
    --cc=linux-edac@vger.kernel.org \
    --cc=linux-hwmon@vger.kernel.org \
    --cc=linux-hyperv@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-perf-users@vger.kernel.org \
    --cc=linux-pm@vger.kernel.org \
    --cc=luto@kernel.org \
    --cc=mark.rutland@arm.com \
    --cc=mingo@redhat.com \
    --cc=namhyung@kernel.org \
    --cc=netdev@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=peterz@infradead.org \
    --cc=platform-driver-x86@vger.kernel.org \
    --cc=seanjc@google.com \
    --cc=tglx@linutronix.de \
    --cc=tony.luck@intel.com \
    --cc=virtualization@lists.linux.dev \
    --cc=vkuznets@redhat.com \
    --cc=wei.liu@kernel.org \
    --cc=x86@kernel.org \
    --cc=xen-devel@lists.xenproject.org \
    --cc=xin@zytor.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox