From: Paul Durrant <Paul.Durrant@citrix.com>
To: 'Jan Beulich' <JBeulich@suse.com>,
xen-devel <xen-devel@lists.xenproject.org>
Cc: Andrew Cooper <Andrew.Cooper3@citrix.com>,
George Dunlap <George.Dunlap@citrix.com>
Subject: Re: [PATCH v4 07/20] x86: move and rename XSTATE_*
Date: Wed, 28 Feb 2018 13:06:19 +0000 [thread overview]
Message-ID: <9f910d8a32b94fb4972b9937cfda4d76@AMSPEX02CL03.citrite.net> (raw)
In-Reply-To: <5A96B64902000078001ACBC7@prv-mh.provo.novell.com>
> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: 28 February 2018 13:02
> To: xen-devel <xen-devel@lists.xenproject.org>
> Cc: Andrew Cooper <Andrew.Cooper3@citrix.com>; Paul Durrant
> <Paul.Durrant@citrix.com>; George Dunlap <George.Dunlap@citrix.com>
> Subject: [PATCH v4 07/20] x86: move and rename XSTATE_*
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
> ---
> v4: New, split off from later patch.
>
> --- a/xen/arch/x86/cpuid.c
> +++ b/xen/arch/x86/cpuid.c
> @@ -122,42 +122,42 @@ static void recalculate_xstate(struct cp
>
> if ( p->basic.avx )
> {
> - xstates |= XSTATE_YMM;
> + xstates |= X86_XCR0_YMM;
> xstate_size = max(xstate_size,
> - xstate_offsets[_XSTATE_YMM] +
> - xstate_sizes[_XSTATE_YMM]);
> + xstate_offsets[X86_XCR0_YMM_POS] +
> + xstate_sizes[X86_XCR0_YMM_POS]);
> }
>
> if ( p->feat.mpx )
> {
> - xstates |= XSTATE_BNDREGS | XSTATE_BNDCSR;
> + xstates |= X86_XCR0_BNDREGS | X86_XCR0_BNDCSR;
> xstate_size = max(xstate_size,
> - xstate_offsets[_XSTATE_BNDCSR] +
> - xstate_sizes[_XSTATE_BNDCSR]);
> + xstate_offsets[X86_XCR0_BNDCSR_POS] +
> + xstate_sizes[X86_XCR0_BNDCSR_POS]);
> }
>
> if ( p->feat.avx512f )
> {
> - xstates |= XSTATE_OPMASK | XSTATE_ZMM | XSTATE_HI_ZMM;
> + xstates |= X86_XCR0_OPMASK | X86_XCR0_ZMM |
> X86_XCR0_HI_ZMM;
> xstate_size = max(xstate_size,
> - xstate_offsets[_XSTATE_HI_ZMM] +
> - xstate_sizes[_XSTATE_HI_ZMM]);
> + xstate_offsets[X86_XCR0_HI_ZMM_POS] +
> + xstate_sizes[X86_XCR0_HI_ZMM_POS]);
> }
>
> if ( p->feat.pku )
> {
> - xstates |= XSTATE_PKRU;
> + xstates |= X86_XCR0_PKRU;
> xstate_size = max(xstate_size,
> - xstate_offsets[_XSTATE_PKRU] +
> - xstate_sizes[_XSTATE_PKRU]);
> + xstate_offsets[X86_XCR0_PKRU_POS] +
> + xstate_sizes[X86_XCR0_PKRU_POS]);
> }
>
> if ( p->extd.lwp )
> {
> - xstates |= XSTATE_LWP;
> + xstates |= X86_XCR0_LWP;
> xstate_size = max(xstate_size,
> - xstate_offsets[_XSTATE_LWP] +
> - xstate_sizes[_XSTATE_LWP]);
> + xstate_offsets[X86_XCR0_LWP_POS] +
> + xstate_sizes[X86_XCR0_LWP_POS]);
> }
>
> p->xstate.max_size = xstate_size;
> @@ -1016,7 +1016,7 @@ void guest_cpuid(const struct vcpu *v, u
> break;
>
> case 0x8000001c:
> - if ( (v->arch.xcr0 & XSTATE_LWP) && cpu_has_svm )
> + if ( (v->arch.xcr0 & X86_XCR0_LWP) && cpu_has_svm )
> /* Turn on available bit and other features specified in lwp_cfg. */
> res->a = (res->d & v->arch.hvm_svm.guest_lwp_cfg) | 1;
> break;
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -1882,8 +1882,8 @@ static int hvmemul_get_fpu(
> case X86EMUL_FPU_xmm:
> break;
> case X86EMUL_FPU_ymm:
> - if ( !(curr->arch.xcr0 & XSTATE_SSE) ||
> - !(curr->arch.xcr0 & XSTATE_YMM) )
> + if ( !(curr->arch.xcr0 & X86_XCR0_SSE) ||
> + !(curr->arch.xcr0 & X86_XCR0_YMM) )
> return X86EMUL_UNHANDLEABLE;
> break;
> default:
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -318,7 +318,7 @@ bool hvm_set_guest_bndcfgs(struct vcpu *
> * enabled in BNDCFGS.
> */
> if ( (val & IA32_BNDCFGS_ENABLE) &&
> - !(v->arch.xcr0_accum & (XSTATE_BNDREGS | XSTATE_BNDCSR)) )
> + !(v->arch.xcr0_accum & (X86_XCR0_BNDREGS | X86_XCR0_BNDCSR)) )
> {
> uint64_t xcr0 = get_xcr0();
> int rc;
> @@ -327,7 +327,7 @@ bool hvm_set_guest_bndcfgs(struct vcpu *
> return false;
>
> rc = handle_xsetbv(XCR_XFEATURE_ENABLED_MASK,
> - xcr0 | XSTATE_BNDREGS | XSTATE_BNDCSR);
> + xcr0 | X86_XCR0_BNDREGS | X86_XCR0_BNDCSR);
>
> if ( rc )
> {
> @@ -2409,10 +2409,10 @@ int hvm_set_cr4(unsigned long value, boo
> * guest may enable the feature in CR4 without enabling it in XCR0. We
> * need to context switch / migrate PKRU nevertheless.
> */
> - if ( (value & X86_CR4_PKE) && !(v->arch.xcr0_accum & XSTATE_PKRU) )
> + if ( (value & X86_CR4_PKE) && !(v->arch.xcr0_accum & X86_XCR0_PKRU)
> )
> {
> int rc = handle_xsetbv(XCR_XFEATURE_ENABLED_MASK,
> - get_xcr0() | XSTATE_PKRU);
> + get_xcr0() | X86_XCR0_PKRU);
>
> if ( rc )
> {
> @@ -2421,7 +2421,7 @@ int hvm_set_cr4(unsigned long value, boo
> }
>
> if ( handle_xsetbv(XCR_XFEATURE_ENABLED_MASK,
> - get_xcr0() & ~XSTATE_PKRU) )
> + get_xcr0() & ~X86_XCR0_PKRU) )
> /* nothing, best effort only */;
> }
>
> @@ -3890,7 +3890,7 @@ void hvm_vcpu_reset_state(struct vcpu *v
> fpu_ctxt->mxcsr = MXCSR_DEFAULT;
> if ( v->arch.xsave_area )
> {
> - v->arch.xsave_area->xsave_hdr.xstate_bv = XSTATE_FP;
> + v->arch.xsave_area->xsave_hdr.xstate_bv = X86_XCR0_FP;
> v->arch.xsave_area->xsave_hdr.xcomp_bv = 0;
> }
>
> --- a/xen/arch/x86/x86_emulate/x86_emulate.c
> +++ b/xen/arch/x86/x86_emulate/x86_emulate.c
> @@ -2157,7 +2157,7 @@ static void adjust_bnd(struct x86_emulat
> * (in which case executing any suitable non-prefixed branch
> * instruction would do), or use XRSTOR.
> */
> - xstate_set_init(XSTATE_BNDREGS);
> + xstate_set_init(X86_XCR0_BNDREGS);
> }
> done:;
> }
> --- a/xen/arch/x86/xstate.c
> +++ b/xen/arch/x86/xstate.c
> @@ -304,7 +304,7 @@ void xsave(struct vcpu *v, uint64_t mask
> "=m" (*ptr), \
> "a" (lmask), "d" (hmask), "D" (ptr))
>
> - if ( fip_width == 8 || !(mask & XSTATE_FP) )
> + if ( fip_width == 8 || !(mask & X86_XCR0_FP) )
> {
> XSAVE("0x48,");
> }
> @@ -357,7 +357,7 @@ void xsave(struct vcpu *v, uint64_t mask
> fip_width = 8;
> }
> #undef XSAVE
> - if ( mask & XSTATE_FP )
> + if ( mask & X86_XCR0_FP )
> ptr->fpu_sse.x[FPU_WORD_SIZE_OFFSET] = fip_width;
> }
>
> @@ -375,7 +375,7 @@ void xrstor(struct vcpu *v, uint64_t mas
> * sometimes new user value. Both should be ok. Use the FPU saved
> * data block as a safe address because it should be in L1.
> */
> - if ( (mask & ptr->xsave_hdr.xstate_bv & XSTATE_FP) &&
> + if ( (mask & ptr->xsave_hdr.xstate_bv & X86_XCR0_FP) &&
> !(ptr->fpu_sse.fsw & ~ptr->fpu_sse.fcw & 0x003f) &&
> boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
> asm volatile ( "fnclex\n\t" /* clear exceptions */
> @@ -451,8 +451,8 @@ void xrstor(struct vcpu *v, uint64_t mas
> * Also try to eliminate fault reasons, even if this shouldn't be
> * needed here (other code should ensure the sanity of the data).
> */
> - if ( ((mask & XSTATE_SSE) ||
> - ((mask & XSTATE_YMM) &&
> + if ( ((mask & X86_XCR0_SSE) ||
> + ((mask & X86_XCR0_YMM) &&
> !(ptr->xsave_hdr.xcomp_bv & XSTATE_COMPACTION_ENABLED)))
> )
> ptr->fpu_sse.mxcsr &= mxcsr_mask;
> if ( v->arch.xcr0_accum & XSTATE_XSAVES_ONLY )
> @@ -595,7 +595,7 @@ void xstate_init(struct cpuinfo_x86 *c)
> cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
>
> BUG_ON((eax & XSTATE_FP_SSE) != XSTATE_FP_SSE);
> - BUG_ON((eax & XSTATE_YMM) && !(eax & XSTATE_SSE));
> + BUG_ON((eax & X86_XCR0_YMM) && !(eax & X86_XCR0_SSE));
> feature_mask = (((u64)edx << 32) | eax) & XCNTXT_MASK;
>
> /*
> @@ -648,26 +648,26 @@ void xstate_init(struct cpuinfo_x86 *c)
> static bool valid_xcr0(u64 xcr0)
> {
> /* FP must be unconditionally set. */
> - if ( !(xcr0 & XSTATE_FP) )
> + if ( !(xcr0 & X86_XCR0_FP) )
> return false;
>
> /* YMM depends on SSE. */
> - if ( (xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE) )
> + if ( (xcr0 & X86_XCR0_YMM) && !(xcr0 & X86_XCR0_SSE) )
> return false;
>
> - if ( xcr0 & (XSTATE_OPMASK | XSTATE_ZMM | XSTATE_HI_ZMM) )
> + if ( xcr0 & (X86_XCR0_OPMASK | X86_XCR0_ZMM | X86_XCR0_HI_ZMM)
> )
> {
> /* OPMASK, ZMM, and HI_ZMM require YMM. */
> - if ( !(xcr0 & XSTATE_YMM) )
> + if ( !(xcr0 & X86_XCR0_YMM) )
> return false;
>
> /* OPMASK, ZMM, and HI_ZMM must be the same. */
> - if ( ~xcr0 & (XSTATE_OPMASK | XSTATE_ZMM | XSTATE_HI_ZMM) )
> + if ( ~xcr0 & (X86_XCR0_OPMASK | X86_XCR0_ZMM |
> X86_XCR0_HI_ZMM) )
> return false;
> }
>
> /* BNDREGS and BNDCSR must be the same. */
> - return !(xcr0 & XSTATE_BNDREGS) == !(xcr0 & XSTATE_BNDCSR);
> + return !(xcr0 & X86_XCR0_BNDREGS) == !(xcr0 & X86_XCR0_BNDCSR);
> }
>
> int validate_xstate(u64 xcr0, u64 xcr0_accum, const struct xsave_hdr *hdr)
> @@ -703,7 +703,7 @@ int handle_xsetbv(u32 index, u64 new_bv)
> return -EINVAL;
>
> /* XCR0.PKRU is disabled on PV mode. */
> - if ( is_pv_vcpu(curr) && (new_bv & XSTATE_PKRU) )
> + if ( is_pv_vcpu(curr) && (new_bv & X86_XCR0_PKRU) )
> return -EOPNOTSUPP;
>
> if ( !set_xcr0(new_bv) )
> @@ -714,7 +714,7 @@ int handle_xsetbv(u32 index, u64 new_bv)
> curr->arch.xcr0_accum |= new_bv;
>
> /* LWP sets nonlazy_xstate_used independently. */
> - if ( new_bv & (XSTATE_NONLAZY & ~XSTATE_LWP) )
> + if ( new_bv & (XSTATE_NONLAZY & ~X86_XCR0_LWP) )
> curr->arch.nonlazy_xstate_used = 1;
>
> mask &= curr->fpu_dirtied ? ~XSTATE_FP_SSE : XSTATE_NONLAZY;
> @@ -755,7 +755,7 @@ uint64_t read_bndcfgu(void)
> {
> asm ( ".byte 0x0f,0xc7,0x27\n" /* xsavec */
> : "=m" (*xstate)
> - : "a" (XSTATE_BNDCSR), "d" (0), "D" (xstate) );
> + : "a" (X86_XCR0_BNDCSR), "d" (0), "D" (xstate) );
>
> bndcsr = (void *)(xstate + 1);
> }
> @@ -763,15 +763,15 @@ uint64_t read_bndcfgu(void)
> {
> asm ( ".byte 0x0f,0xae,0x27\n" /* xsave */
> : "=m" (*xstate)
> - : "a" (XSTATE_BNDCSR), "d" (0), "D" (xstate) );
> + : "a" (X86_XCR0_BNDCSR), "d" (0), "D" (xstate) );
>
> - bndcsr = (void *)xstate + xstate_offsets[_XSTATE_BNDCSR];
> + bndcsr = (void *)xstate + xstate_offsets[X86_XCR0_BNDCSR_POS];
> }
>
> if ( cr0 & X86_CR0_TS )
> write_cr0(cr0);
>
> - return xstate->xsave_hdr.xstate_bv & XSTATE_BNDCSR ? bndcsr-
> >bndcfgu : 0;
> + return xstate->xsave_hdr.xstate_bv & X86_XCR0_BNDCSR ? bndcsr-
> >bndcfgu : 0;
> }
>
> void xstate_set_init(uint64_t mask)
> --- a/xen/include/asm-x86/x86-defns.h
> +++ b/xen/include/asm-x86/x86-defns.h
> @@ -66,4 +66,28 @@
> #define X86_CR4_SMAP 0x00200000 /* enable SMAP */
> #define X86_CR4_PKE 0x00400000 /* enable PKE */
>
> +/*
> + * XSTATE component flags in XCR0
> + */
> +#define X86_XCR0_FP_POS 0
> +#define X86_XCR0_FP (1ULL << X86_XCR0_FP_POS)
> +#define X86_XCR0_SSE_POS 1
> +#define X86_XCR0_SSE (1ULL << X86_XCR0_SSE_POS)
> +#define X86_XCR0_YMM_POS 2
> +#define X86_XCR0_YMM (1ULL << X86_XCR0_YMM_POS)
> +#define X86_XCR0_BNDREGS_POS 3
> +#define X86_XCR0_BNDREGS (1ULL << X86_XCR0_BNDREGS_POS)
> +#define X86_XCR0_BNDCSR_POS 4
> +#define X86_XCR0_BNDCSR (1ULL << X86_XCR0_BNDCSR_POS)
> +#define X86_XCR0_OPMASK_POS 5
> +#define X86_XCR0_OPMASK (1ULL << X86_XCR0_OPMASK_POS)
> +#define X86_XCR0_ZMM_POS 6
> +#define X86_XCR0_ZMM (1ULL << X86_XCR0_ZMM_POS)
> +#define X86_XCR0_HI_ZMM_POS 7
> +#define X86_XCR0_HI_ZMM (1ULL << X86_XCR0_HI_ZMM_POS)
> +#define X86_XCR0_PKRU_POS 9
> +#define X86_XCR0_PKRU (1ULL << X86_XCR0_PKRU_POS)
> +#define X86_XCR0_LWP_POS 62
> +#define X86_XCR0_LWP (1ULL << X86_XCR0_LWP_POS)
> +
> #endif /* __XEN_X86_DEFNS_H__ */
> --- a/xen/include/asm-x86/xstate.h
> +++ b/xen/include/asm-x86/xstate.h
> @@ -10,6 +10,7 @@
>
> #include <xen/sched.h>
> #include <asm/cpufeature.h>
> +#include <asm/x86-defns.h>
>
> #define FCW_DEFAULT 0x037f
> #define FCW_RESET 0x0040
> @@ -28,34 +29,14 @@ extern uint32_t mxcsr_mask;
> #define XSAVE_HDR_OFFSET FXSAVE_SIZE
> #define XSTATE_AREA_MIN_SIZE (FXSAVE_SIZE + XSAVE_HDR_SIZE)
>
> -#define _XSTATE_FP 0
> -#define XSTATE_FP (1ULL << _XSTATE_FP)
> -#define _XSTATE_SSE 1
> -#define XSTATE_SSE (1ULL << _XSTATE_SSE)
> -#define _XSTATE_YMM 2
> -#define XSTATE_YMM (1ULL << _XSTATE_YMM)
> -#define _XSTATE_BNDREGS 3
> -#define XSTATE_BNDREGS (1ULL << _XSTATE_BNDREGS)
> -#define _XSTATE_BNDCSR 4
> -#define XSTATE_BNDCSR (1ULL << _XSTATE_BNDCSR)
> -#define _XSTATE_OPMASK 5
> -#define XSTATE_OPMASK (1ULL << _XSTATE_OPMASK)
> -#define _XSTATE_ZMM 6
> -#define XSTATE_ZMM (1ULL << _XSTATE_ZMM)
> -#define _XSTATE_HI_ZMM 7
> -#define XSTATE_HI_ZMM (1ULL << _XSTATE_HI_ZMM)
> -#define _XSTATE_PKRU 9
> -#define XSTATE_PKRU (1ULL << _XSTATE_PKRU)
> -#define _XSTATE_LWP 62
> -#define XSTATE_LWP (1ULL << _XSTATE_LWP)
> -
> -#define XSTATE_FP_SSE (XSTATE_FP | XSTATE_SSE)
> -#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM |
> XSTATE_OPMASK | \
> - XSTATE_ZMM | XSTATE_HI_ZMM | XSTATE_NONLAZY)
> +#define XSTATE_FP_SSE (X86_XCR0_FP | X86_XCR0_SSE)
> +#define XCNTXT_MASK (X86_XCR0_FP | X86_XCR0_SSE |
> X86_XCR0_YMM | \
> + X86_XCR0_OPMASK | X86_XCR0_ZMM | X86_XCR0_HI_ZMM |
> \
> + XSTATE_NONLAZY)
>
> #define XSTATE_ALL (~(1ULL << 63))
> -#define XSTATE_NONLAZY (XSTATE_LWP | XSTATE_BNDREGS |
> XSTATE_BNDCSR | \
> - XSTATE_PKRU)
> +#define XSTATE_NONLAZY (X86_XCR0_LWP | X86_XCR0_BNDREGS |
> X86_XCR0_BNDCSR | \
> + X86_XCR0_PKRU)
> #define XSTATE_LAZY (XSTATE_ALL & ~XSTATE_NONLAZY)
> #define XSTATE_XSAVES_ONLY 0
> #define XSTATE_COMPACTION_ENABLED (1ULL << 63)
>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
next prev parent reply other threads:[~2018-02-28 13:09 UTC|newest]
Thread overview: 36+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-02-28 12:50 [PATCH v4 00/20] x86: emulator enhancements Jan Beulich
2018-02-28 12:57 ` [PATCH v4 01/20] x86emul: extend vbroadcasts{s, d} to AVX2 Jan Beulich
2018-03-02 19:08 ` Andrew Cooper
2018-02-28 12:58 ` [PATCH v4 02/20] x86emul: support most remaining AVX2 insns Jan Beulich
2018-03-05 13:39 ` Andrew Cooper
2018-03-05 14:50 ` Jan Beulich
2018-02-28 12:58 ` [PATCH v4 03/20] x86emul: support AVX2 gather insns Jan Beulich
2018-03-05 14:15 ` Andrew Cooper
2018-02-28 12:59 ` [PATCH v4 04/20] x86emul: support XOP insns Jan Beulich
2018-02-28 12:59 ` [PATCH v4 05/20] x86emul: support 3DNow! insns Jan Beulich
2018-03-05 14:55 ` Andrew Cooper
2018-02-28 13:00 ` [PATCH v4 06/20] x86emul: place test blobs in executable section Jan Beulich
2018-03-06 11:28 ` Andrew Cooper
2018-03-06 13:33 ` Jan Beulich
2018-02-28 13:00 ` [PATCH v4 07/20] x86: move and rename XSTATE_* Jan Beulich
2018-02-28 13:01 ` Jan Beulich
2018-02-28 13:06 ` Paul Durrant [this message]
2018-03-05 15:27 ` Andrew Cooper
2018-02-28 13:03 ` [PATCH v4 08/20] x86emul: abstract out XCRn accesses Jan Beulich
2018-03-05 15:56 ` Andrew Cooper
2018-03-08 9:33 ` Jan Beulich
2018-02-28 13:04 ` [PATCH v4 09/20] x86emul: adjust_bnd() should check XCR0 Jan Beulich
2018-02-28 13:04 ` [PATCH v4 10/20] x86emul: make all FPU emulation use the stub Jan Beulich
2018-02-28 13:05 ` [PATCH v4 11/20] x86/HVM: eliminate custom #MF/#XM handling Jan Beulich
2018-02-28 13:06 ` [PATCH v4 12/20] x86emul: support SWAPGS Jan Beulich
2018-03-05 15:58 ` Andrew Cooper
2018-02-28 13:08 ` [PATCH v4 13/20] x86emul: tell cmpxchg hook whether LOCK is in effect Jan Beulich
2018-02-28 13:08 ` [PATCH v4 14/20] x86/PV: convert page table emulation code from paddr_t to intpte_t Jan Beulich
2018-02-28 13:09 ` [PATCH v4 15/20] x86emul: correctly handle CMPXCHG* comparison failures Jan Beulich
2018-03-01 7:16 ` Tim Deegan
2018-02-28 13:10 ` [PATCH v4 16/20] x86emul: add read-modify-write hook Jan Beulich
2018-02-28 13:11 ` [PATCH v4 17/20] x86/HVM: do actual CMPXCHG in hvmemul_cmpxchg() Jan Beulich
2018-02-28 13:12 ` [PATCH v4 18/20] x86/HVM: make use of new read-modify-write emulator hook Jan Beulich
2018-02-28 13:13 ` [PATCH v4 19/20] x86/shadow: fully move unmap-dest into common code Jan Beulich
2018-02-28 13:13 ` [PATCH v4 20/20] x86/shadow: fold sh_x86_emulate_{write, cmpxchg}() into their only callers Jan Beulich
2018-03-02 9:21 ` [PATCH v4 00/20] x86: emulator enhancements Razvan Cojocaru
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=9f910d8a32b94fb4972b9937cfda4d76@AMSPEX02CL03.citrite.net \
--to=paul.durrant@citrix.com \
--cc=Andrew.Cooper3@citrix.com \
--cc=George.Dunlap@citrix.com \
--cc=JBeulich@suse.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).