xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: George Dunlap <george.dunlap@eu.citrix.com>
To: Dario Faggioli <dario.faggioli@citrix.com>, xen-devel@lists.xen.org
Cc: Marcus Granado <Marcus.Granado@eu.citrix.com>,
	Keir Fraser <keir@xen.org>,
	Ian Campbell <Ian.Campbell@citrix.com>,
	Li Yechen <lccycc123@gmail.com>,
	Andrew Cooper <Andrew.Cooper3@citrix.com>,
	Juergen Gross <juergen.gross@ts.fujitsu.com>,
	Ian Jackson <Ian.Jackson@eu.citrix.com>,
	Jan Beulich <JBeulich@suse.com>,
	Justin Weaver <jtweaver@hawaii.edu>, Matt Wilson <msw@amazon.com>,
	Elena Ufimtseva <ufimtseva@gmail.com>
Subject: Re: [PATCH v2 07/16] xen: sched: rename v->cpu_affinity into v->cpu_hard_affinity
Date: Thu, 14 Nov 2013 14:17:12 +0000	[thread overview]
Message-ID: <5284DB68.5090502@eu.citrix.com> (raw)
In-Reply-To: <20131113191202.18086.49164.stgit@Solace>

On 13/11/13 19:12, Dario Faggioli wrote:
> in order to distinguish it from the cpu_soft_affinity introduced
> by the previous commit ("xen: sched: make space for
> cpu_soft_affinity"). This patch does not imply any functional
> change, it is basically the result of something like the following:
>
>   s/cpu_affinity/cpu_hard_affinity/g
>   s/cpu_affinity_tmp/cpu_hard_affinity_tmp/g
>   s/cpu_affinity_saved/cpu_hard_affinity_saved/g
>
> Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>

Reviewed-by: George Dunlap <george.dunlap@eu.citrix.com>

> ---
>   xen/arch/x86/traps.c      |   11 ++++++-----
>   xen/common/domain.c       |   22 +++++++++++-----------
>   xen/common/domctl.c       |    2 +-
>   xen/common/keyhandler.c   |    2 +-
>   xen/common/sched_credit.c |   12 ++++++------
>   xen/common/sched_sedf.c   |    2 +-
>   xen/common/schedule.c     |   21 +++++++++++----------
>   xen/common/wait.c         |    4 ++--
>   xen/include/xen/sched.h   |    8 ++++----
>   9 files changed, 43 insertions(+), 41 deletions(-)
>
> diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
> index e5b3585..4279cad 100644
> --- a/xen/arch/x86/traps.c
> +++ b/xen/arch/x86/traps.c
> @@ -3083,7 +3083,8 @@ static void nmi_mce_softirq(void)
>   
>       /* Set the tmp value unconditionally, so that
>        * the check in the iret hypercall works. */
> -    cpumask_copy(st->vcpu->cpu_affinity_tmp, st->vcpu->cpu_affinity);
> +    cpumask_copy(st->vcpu->cpu_hard_affinity_tmp,
> +                 st->vcpu->cpu_hard_affinity);
>   
>       if ((cpu != st->processor)
>          || (st->processor != st->vcpu->processor))
> @@ -3118,11 +3119,11 @@ void async_exception_cleanup(struct vcpu *curr)
>           return;
>   
>       /* Restore affinity.  */
> -    if ( !cpumask_empty(curr->cpu_affinity_tmp) &&
> -         !cpumask_equal(curr->cpu_affinity_tmp, curr->cpu_affinity) )
> +    if ( !cpumask_empty(curr->cpu_hard_affinity_tmp) &&
> +         !cpumask_equal(curr->cpu_hard_affinity_tmp, curr->cpu_hard_affinity) )
>       {
> -        vcpu_set_affinity(curr, curr->cpu_affinity_tmp);
> -        cpumask_clear(curr->cpu_affinity_tmp);
> +        vcpu_set_affinity(curr, curr->cpu_hard_affinity_tmp);
> +        cpumask_clear(curr->cpu_hard_affinity_tmp);
>       }
>   
>       if ( !(curr->async_exception_mask & (curr->async_exception_mask - 1)) )
> diff --git a/xen/common/domain.c b/xen/common/domain.c
> index c33b876..2916490 100644
> --- a/xen/common/domain.c
> +++ b/xen/common/domain.c
> @@ -125,9 +125,9 @@ struct vcpu *alloc_vcpu(
>   
>       tasklet_init(&v->continue_hypercall_tasklet, NULL, 0);
>   
> -    if ( !zalloc_cpumask_var(&v->cpu_affinity) ||
> -         !zalloc_cpumask_var(&v->cpu_affinity_tmp) ||
> -         !zalloc_cpumask_var(&v->cpu_affinity_saved) ||
> +    if ( !zalloc_cpumask_var(&v->cpu_hard_affinity) ||
> +         !zalloc_cpumask_var(&v->cpu_hard_affinity_tmp) ||
> +         !zalloc_cpumask_var(&v->cpu_hard_affinity_saved) ||
>            !zalloc_cpumask_var(&v->cpu_soft_affinity) ||
>            !zalloc_cpumask_var(&v->vcpu_dirty_cpumask) )
>           goto fail_free;
> @@ -157,9 +157,9 @@ struct vcpu *alloc_vcpu(
>    fail_wq:
>           destroy_waitqueue_vcpu(v);
>    fail_free:
> -        free_cpumask_var(v->cpu_affinity);
> -        free_cpumask_var(v->cpu_affinity_tmp);
> -        free_cpumask_var(v->cpu_affinity_saved);
> +        free_cpumask_var(v->cpu_hard_affinity);
> +        free_cpumask_var(v->cpu_hard_affinity_tmp);
> +        free_cpumask_var(v->cpu_hard_affinity_saved);
>           free_cpumask_var(v->cpu_soft_affinity);
>           free_cpumask_var(v->vcpu_dirty_cpumask);
>           free_vcpu_struct(v);
> @@ -373,7 +373,7 @@ void domain_update_node_affinity(struct domain *d)
>   
>       for_each_vcpu ( d, v )
>       {
> -        cpumask_and(online_affinity, v->cpu_affinity, online);
> +        cpumask_and(online_affinity, v->cpu_hard_affinity, online);
>           cpumask_or(cpumask, cpumask, online_affinity);
>       }
>   
> @@ -736,9 +736,9 @@ static void complete_domain_destroy(struct rcu_head *head)
>       for ( i = d->max_vcpus - 1; i >= 0; i-- )
>           if ( (v = d->vcpu[i]) != NULL )
>           {
> -            free_cpumask_var(v->cpu_affinity);
> -            free_cpumask_var(v->cpu_affinity_tmp);
> -            free_cpumask_var(v->cpu_affinity_saved);
> +            free_cpumask_var(v->cpu_hard_affinity);
> +            free_cpumask_var(v->cpu_hard_affinity_tmp);
> +            free_cpumask_var(v->cpu_hard_affinity_saved);
>               free_cpumask_var(v->cpu_soft_affinity);
>               free_cpumask_var(v->vcpu_dirty_cpumask);
>               free_vcpu_struct(v);
> @@ -878,7 +878,7 @@ int vcpu_reset(struct vcpu *v)
>       v->async_exception_mask = 0;
>       memset(v->async_exception_state, 0, sizeof(v->async_exception_state));
>   #endif
> -    cpumask_clear(v->cpu_affinity_tmp);
> +    cpumask_clear(v->cpu_hard_affinity_tmp);
>       clear_bit(_VPF_blocked, &v->pause_flags);
>       clear_bit(_VPF_in_reset, &v->pause_flags);
>   
> diff --git a/xen/common/domctl.c b/xen/common/domctl.c
> index 904d27b..5e0ac5c 100644
> --- a/xen/common/domctl.c
> +++ b/xen/common/domctl.c
> @@ -629,7 +629,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
>           else
>           {
>               ret = cpumask_to_xenctl_bitmap(
> -                &op->u.vcpuaffinity.cpumap, v->cpu_affinity);
> +                &op->u.vcpuaffinity.cpumap, v->cpu_hard_affinity);
>           }
>       }
>       break;
> diff --git a/xen/common/keyhandler.c b/xen/common/keyhandler.c
> index 33c9a37..42fb418 100644
> --- a/xen/common/keyhandler.c
> +++ b/xen/common/keyhandler.c
> @@ -296,7 +296,7 @@ static void dump_domains(unsigned char key)
>                      !vcpu_event_delivery_is_enabled(v));
>               cpuset_print(tmpstr, sizeof(tmpstr), v->vcpu_dirty_cpumask);
>               printk("dirty_cpus=%s ", tmpstr);
> -            cpuset_print(tmpstr, sizeof(tmpstr), v->cpu_affinity);
> +            cpuset_print(tmpstr, sizeof(tmpstr), v->cpu_hard_affinity);
>               printk("cpu_affinity=%s\n", tmpstr);
>               cpuset_print(tmpstr, sizeof(tmpstr), v->cpu_soft_affinity);
>               printk("cpu_soft_affinity=%s\n", tmpstr);
> diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
> index 28dafcf..398b095 100644
> --- a/xen/common/sched_credit.c
> +++ b/xen/common/sched_credit.c
> @@ -332,13 +332,13 @@ csched_balance_cpumask(const struct vcpu *vc, int step, cpumask_t *mask)
>       if ( step == CSCHED_BALANCE_NODE_AFFINITY )
>       {
>           cpumask_and(mask, CSCHED_DOM(vc->domain)->node_affinity_cpumask,
> -                    vc->cpu_affinity);
> +                    vc->cpu_hard_affinity);
>   
>           if ( unlikely(cpumask_empty(mask)) )
> -            cpumask_copy(mask, vc->cpu_affinity);
> +            cpumask_copy(mask, vc->cpu_hard_affinity);
>       }
>       else /* step == CSCHED_BALANCE_CPU_AFFINITY */
> -        cpumask_copy(mask, vc->cpu_affinity);
> +        cpumask_copy(mask, vc->cpu_hard_affinity);
>   }
>   
>   static void burn_credits(struct csched_vcpu *svc, s_time_t now)
> @@ -407,7 +407,7 @@ __runq_tickle(unsigned int cpu, struct csched_vcpu *new)
>   
>               if ( balance_step == CSCHED_BALANCE_NODE_AFFINITY
>                    && !__vcpu_has_node_affinity(new->vcpu,
> -                                              new->vcpu->cpu_affinity) )
> +                                              new->vcpu->cpu_hard_affinity) )
>                   continue;
>   
>               /* Are there idlers suitable for new (for this balance step)? */
> @@ -642,7 +642,7 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, bool_t commit)
>   
>       /* Store in cpus the mask of online cpus on which the domain can run */
>       online = cpupool_scheduler_cpumask(vc->domain->cpupool);
> -    cpumask_and(&cpus, vc->cpu_affinity, online);
> +    cpumask_and(&cpus, vc->cpu_hard_affinity, online);
>   
>       for_each_csched_balance_step( balance_step )
>       {
> @@ -1487,7 +1487,7 @@ csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step)
>                * or counter.
>                */
>               if ( balance_step == CSCHED_BALANCE_NODE_AFFINITY
> -                 && !__vcpu_has_node_affinity(vc, vc->cpu_affinity) )
> +                 && !__vcpu_has_node_affinity(vc, vc->cpu_hard_affinity) )
>                   continue;
>   
>               csched_balance_cpumask(vc, balance_step, csched_balance_mask);
> diff --git a/xen/common/sched_sedf.c b/xen/common/sched_sedf.c
> index 7c24171..c219aed 100644
> --- a/xen/common/sched_sedf.c
> +++ b/xen/common/sched_sedf.c
> @@ -396,7 +396,7 @@ static int sedf_pick_cpu(const struct scheduler *ops, struct vcpu *v)
>       cpumask_t *online;
>   
>       online = cpupool_scheduler_cpumask(v->domain->cpupool);
> -    cpumask_and(&online_affinity, v->cpu_affinity, online);
> +    cpumask_and(&online_affinity, v->cpu_hard_affinity, online);
>       return cpumask_cycle(v->vcpu_id % cpumask_weight(&online_affinity) - 1,
>                            &online_affinity);
>   }
> diff --git a/xen/common/schedule.c b/xen/common/schedule.c
> index 5731622..28099d6 100644
> --- a/xen/common/schedule.c
> +++ b/xen/common/schedule.c
> @@ -194,9 +194,9 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor)
>        */
>       v->processor = processor;
>       if ( is_idle_domain(d) || d->is_pinned )
> -        cpumask_copy(v->cpu_affinity, cpumask_of(processor));
> +        cpumask_copy(v->cpu_hard_affinity, cpumask_of(processor));
>       else
> -        cpumask_setall(v->cpu_affinity);
> +        cpumask_setall(v->cpu_hard_affinity);
>   
>       cpumask_setall(v->cpu_soft_affinity);
>   
> @@ -287,7 +287,7 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
>           migrate_timer(&v->singleshot_timer, new_p);
>           migrate_timer(&v->poll_timer, new_p);
>   
> -        cpumask_setall(v->cpu_affinity);
> +        cpumask_setall(v->cpu_hard_affinity);
>   
>           lock = vcpu_schedule_lock_irq(v);
>           v->processor = new_p;
> @@ -459,7 +459,7 @@ static void vcpu_migrate(struct vcpu *v)
>                */
>               if ( pick_called &&
>                    (new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) &&
> -                 cpumask_test_cpu(new_cpu, v->cpu_affinity) &&
> +                 cpumask_test_cpu(new_cpu, v->cpu_hard_affinity) &&
>                    cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
>                   break;
>   
> @@ -563,7 +563,7 @@ void restore_vcpu_affinity(struct domain *d)
>           {
>               printk(XENLOG_DEBUG "Restoring affinity for d%dv%d\n",
>                      d->domain_id, v->vcpu_id);
> -            cpumask_copy(v->cpu_affinity, v->cpu_affinity_saved);
> +            cpumask_copy(v->cpu_hard_affinity, v->cpu_hard_affinity_saved);
>               v->affinity_broken = 0;
>           }
>   
> @@ -606,20 +606,21 @@ int cpu_disable_scheduler(unsigned int cpu)
>               unsigned long flags;
>               spinlock_t *lock = vcpu_schedule_lock_irqsave(v, &flags);
>   
> -            cpumask_and(&online_affinity, v->cpu_affinity, c->cpu_valid);
> +            cpumask_and(&online_affinity, v->cpu_hard_affinity, c->cpu_valid);
>               if ( cpumask_empty(&online_affinity) &&
> -                 cpumask_test_cpu(cpu, v->cpu_affinity) )
> +                 cpumask_test_cpu(cpu, v->cpu_hard_affinity) )
>               {
>                   printk(XENLOG_DEBUG "Breaking affinity for d%dv%d\n",
>                           d->domain_id, v->vcpu_id);
>   
>                   if (system_state == SYS_STATE_suspend)
>                   {
> -                    cpumask_copy(v->cpu_affinity_saved, v->cpu_affinity);
> +                    cpumask_copy(v->cpu_hard_affinity_saved,
> +                                 v->cpu_hard_affinity);
>                       v->affinity_broken = 1;
>                   }
>   
> -                cpumask_setall(v->cpu_affinity);
> +                cpumask_setall(v->cpu_hard_affinity);
>               }
>   
>               if ( v->processor == cpu )
> @@ -667,7 +668,7 @@ int vcpu_set_affinity(struct vcpu *v, const cpumask_t *affinity)
>   
>       lock = vcpu_schedule_lock_irq(v);
>   
> -    cpumask_copy(v->cpu_affinity, affinity);
> +    cpumask_copy(v->cpu_hard_affinity, affinity);
>   
>       /* Always ask the scheduler to re-evaluate placement
>        * when changing the affinity */
> diff --git a/xen/common/wait.c b/xen/common/wait.c
> index 3c9366c..3f6ff41 100644
> --- a/xen/common/wait.c
> +++ b/xen/common/wait.c
> @@ -134,7 +134,7 @@ static void __prepare_to_wait(struct waitqueue_vcpu *wqv)
>   
>       /* Save current VCPU affinity; force wakeup on *this* CPU only. */
>       wqv->wakeup_cpu = smp_processor_id();
> -    cpumask_copy(&wqv->saved_affinity, curr->cpu_affinity);
> +    cpumask_copy(&wqv->saved_affinity, curr->cpu_hard_affinity);
>       if ( vcpu_set_affinity(curr, cpumask_of(wqv->wakeup_cpu)) )
>       {
>           gdprintk(XENLOG_ERR, "Unable to set vcpu affinity\n");
> @@ -183,7 +183,7 @@ void check_wakeup_from_wait(void)
>       {
>           /* Re-set VCPU affinity and re-enter the scheduler. */
>           struct vcpu *curr = current;
> -        cpumask_copy(&wqv->saved_affinity, curr->cpu_affinity);
> +        cpumask_copy(&wqv->saved_affinity, curr->cpu_hard_affinity);
>           if ( vcpu_set_affinity(curr, cpumask_of(wqv->wakeup_cpu)) )
>           {
>               gdprintk(XENLOG_ERR, "Unable to set vcpu affinity\n");
> diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
> index 7e00caf..3575312 100644
> --- a/xen/include/xen/sched.h
> +++ b/xen/include/xen/sched.h
> @@ -192,11 +192,11 @@ struct vcpu
>       spinlock_t       virq_lock;
>   
>       /* Bitmask of CPUs on which this VCPU may run. */
> -    cpumask_var_t    cpu_affinity;
> +    cpumask_var_t    cpu_hard_affinity;
>       /* Used to change affinity temporarily. */
> -    cpumask_var_t    cpu_affinity_tmp;
> +    cpumask_var_t    cpu_hard_affinity_tmp;
>       /* Used to restore affinity across S3. */
> -    cpumask_var_t    cpu_affinity_saved;
> +    cpumask_var_t    cpu_hard_affinity_saved;
>   
>       /* Bitmask of CPUs on which this VCPU prefers to run. */
>       cpumask_var_t    cpu_soft_affinity;
> @@ -795,7 +795,7 @@ void watchdog_domain_destroy(struct domain *d);
>   #define has_hvm_container_domain(d) ((d)->guest_type != guest_type_pv)
>   #define has_hvm_container_vcpu(v)   (has_hvm_container_domain((v)->domain))
>   #define is_pinned_vcpu(v) ((v)->domain->is_pinned || \
> -                           cpumask_weight((v)->cpu_affinity) == 1)
> +                           cpumask_weight((v)->cpu_hard_affinity) == 1)
>   #ifdef HAS_PASSTHROUGH
>   #define need_iommu(d)    ((d)->need_iommu)
>   #else
>

  reply	other threads:[~2013-11-14 14:17 UTC|newest]

Thread overview: 62+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-11-13 19:10 [PATCH v2 00/16] Implement vcpu soft affinity for credit1 Dario Faggioli
2013-11-13 19:11 ` [PATCH v2 01/16] xl: match output of vcpu-list with pinning syntax Dario Faggioli
2013-11-14 10:50   ` George Dunlap
2013-11-14 11:11     ` Dario Faggioli
2013-11-14 11:14       ` George Dunlap
2013-11-14 11:13     ` Dario Faggioli
2013-11-14 12:44     ` Ian Jackson
2013-11-14 14:19   ` Ian Jackson
2013-11-13 19:11 ` [PATCH v2 02/16] xl: allow for node-wise specification of vcpu pinning Dario Faggioli
2013-11-14 11:02   ` George Dunlap
2013-11-14 14:24   ` Ian Jackson
2013-11-14 14:37     ` Dario Faggioli
2013-11-13 19:11 ` [PATCH v2 03/16] xl: implement and enable dryrun mode for `xl vcpu-pin' Dario Faggioli
2013-11-13 19:11 ` [PATCH v2 04/16] xl: test script for the cpumap parser (for vCPU pinning) Dario Faggioli
2013-11-13 19:11 ` [PATCH v2 05/16] xen: fix leaking of v->cpu_affinity_saved Dario Faggioli
2013-11-14 11:11   ` George Dunlap
2013-11-14 11:58     ` Dario Faggioli
2013-11-14 14:25   ` Ian Jackson
2013-11-13 19:11 ` [PATCH v2 06/16] xen: sched: make space for cpu_soft_affinity Dario Faggioli
2013-11-14 15:03   ` George Dunlap
2013-11-14 16:14     ` Dario Faggioli
2013-11-15 10:07       ` George Dunlap
2013-11-13 19:12 ` [PATCH v2 07/16] xen: sched: rename v->cpu_affinity into v->cpu_hard_affinity Dario Faggioli
2013-11-14 14:17   ` George Dunlap [this message]
2013-11-13 19:12 ` [PATCH v2 08/16] xen: derive NUMA node affinity from hard and soft CPU affinity Dario Faggioli
2013-11-14 15:21   ` George Dunlap
2013-11-14 16:30     ` Dario Faggioli
2013-11-15 10:52       ` George Dunlap
2013-11-15 14:17         ` Dario Faggioli
2013-11-13 19:12 ` [PATCH v2 09/16] xen: sched: DOMCTL_*vcpuaffinity works with hard and soft affinity Dario Faggioli
2013-11-14 14:42   ` George Dunlap
2013-11-14 16:21     ` Dario Faggioli
2013-11-13 19:12 ` [PATCH v2 10/16] xen: sched: use soft-affinity instead of domain's node-affinity Dario Faggioli
2013-11-14 15:30   ` George Dunlap
2013-11-15  0:39     ` Dario Faggioli
2013-11-15 11:23       ` George Dunlap
2013-11-13 19:12 ` [PATCH v2 11/16] libxc: get and set soft and hard affinity Dario Faggioli
2013-11-14 14:58   ` Ian Jackson
2013-11-14 16:18     ` Dario Faggioli
2013-11-14 15:38   ` George Dunlap
2013-11-14 16:41     ` Dario Faggioli
2013-11-13 19:12 ` [PATCH v2 12/16] libxl: get and set soft affinity Dario Faggioli
2013-11-13 19:16   ` Dario Faggioli
2013-11-14 15:11   ` Ian Jackson
2013-11-14 15:55     ` George Dunlap
2013-11-14 16:25       ` Ian Jackson
2013-11-15  5:13         ` Dario Faggioli
2013-11-15 12:02         ` George Dunlap
2013-11-15 17:29           ` Dario Faggioli
2013-11-15  3:45     ` Dario Faggioli
2013-11-13 19:12 ` [PATCH v2 13/16] xl: show soft affinity in `xl vcpu-list' Dario Faggioli
2013-11-14 15:12   ` Ian Jackson
2013-11-13 19:13 ` [PATCH v2 14/16] xl: enable setting soft affinity Dario Faggioli
2013-11-13 19:13 ` [PATCH v2 15/16] xl: enable for specifying node-affinity in the config file Dario Faggioli
2013-11-14 15:14   ` Ian Jackson
2013-11-14 16:12     ` Dario Faggioli
2013-11-13 19:13 ` [PATCH v2 16/16] libxl: automatic NUMA placement affects soft affinity Dario Faggioli
2013-11-14 15:17   ` Ian Jackson
2013-11-14 16:11     ` Dario Faggioli
2013-11-14 16:03   ` George Dunlap
2013-11-14 16:48     ` Dario Faggioli
2013-11-14 17:49       ` George Dunlap

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=5284DB68.5090502@eu.citrix.com \
    --to=george.dunlap@eu.citrix.com \
    --cc=Andrew.Cooper3@citrix.com \
    --cc=Ian.Campbell@citrix.com \
    --cc=Ian.Jackson@eu.citrix.com \
    --cc=JBeulich@suse.com \
    --cc=Marcus.Granado@eu.citrix.com \
    --cc=dario.faggioli@citrix.com \
    --cc=jtweaver@hawaii.edu \
    --cc=juergen.gross@ts.fujitsu.com \
    --cc=keir@xen.org \
    --cc=lccycc123@gmail.com \
    --cc=msw@amazon.com \
    --cc=ufimtseva@gmail.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).