From mboxrd@z Thu Jan 1 00:00:00 1970 From: George Dunlap Subject: Re: [PATCH v2 07/16] xen: sched: rename v->cpu_affinity into v->cpu_hard_affinity Date: Thu, 14 Nov 2013 14:17:12 +0000 Message-ID: <5284DB68.5090502@eu.citrix.com> References: <20131113190852.18086.5437.stgit@Solace> <20131113191202.18086.49164.stgit@Solace> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii"; Format="flowed" Content-Transfer-Encoding: 7bit Return-path: In-Reply-To: <20131113191202.18086.49164.stgit@Solace> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: Dario Faggioli , xen-devel@lists.xen.org Cc: Marcus Granado , Keir Fraser , Ian Campbell , Li Yechen , Andrew Cooper , Juergen Gross , Ian Jackson , Jan Beulich , Justin Weaver , Matt Wilson , Elena Ufimtseva List-Id: xen-devel@lists.xenproject.org On 13/11/13 19:12, Dario Faggioli wrote: > in order to distinguish it from the cpu_soft_affinity introduced > by the previous commit ("xen: sched: make space for > cpu_soft_affinity"). This patch does not imply any functional > change, it is basically the result of something like the following: > > s/cpu_affinity/cpu_hard_affinity/g > s/cpu_affinity_tmp/cpu_hard_affinity_tmp/g > s/cpu_affinity_saved/cpu_hard_affinity_saved/g > > Signed-off-by: Dario Faggioli Reviewed-by: George Dunlap > --- > xen/arch/x86/traps.c | 11 ++++++----- > xen/common/domain.c | 22 +++++++++++----------- > xen/common/domctl.c | 2 +- > xen/common/keyhandler.c | 2 +- > xen/common/sched_credit.c | 12 ++++++------ > xen/common/sched_sedf.c | 2 +- > xen/common/schedule.c | 21 +++++++++++---------- > xen/common/wait.c | 4 ++-- > xen/include/xen/sched.h | 8 ++++---- > 9 files changed, 43 insertions(+), 41 deletions(-) > > diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c > index e5b3585..4279cad 100644 > --- a/xen/arch/x86/traps.c > +++ b/xen/arch/x86/traps.c > @@ -3083,7 +3083,8 @@ static void nmi_mce_softirq(void) > > /* Set the tmp value unconditionally, so that > * the check in the iret hypercall works. */ > - cpumask_copy(st->vcpu->cpu_affinity_tmp, st->vcpu->cpu_affinity); > + cpumask_copy(st->vcpu->cpu_hard_affinity_tmp, > + st->vcpu->cpu_hard_affinity); > > if ((cpu != st->processor) > || (st->processor != st->vcpu->processor)) > @@ -3118,11 +3119,11 @@ void async_exception_cleanup(struct vcpu *curr) > return; > > /* Restore affinity. */ > - if ( !cpumask_empty(curr->cpu_affinity_tmp) && > - !cpumask_equal(curr->cpu_affinity_tmp, curr->cpu_affinity) ) > + if ( !cpumask_empty(curr->cpu_hard_affinity_tmp) && > + !cpumask_equal(curr->cpu_hard_affinity_tmp, curr->cpu_hard_affinity) ) > { > - vcpu_set_affinity(curr, curr->cpu_affinity_tmp); > - cpumask_clear(curr->cpu_affinity_tmp); > + vcpu_set_affinity(curr, curr->cpu_hard_affinity_tmp); > + cpumask_clear(curr->cpu_hard_affinity_tmp); > } > > if ( !(curr->async_exception_mask & (curr->async_exception_mask - 1)) ) > diff --git a/xen/common/domain.c b/xen/common/domain.c > index c33b876..2916490 100644 > --- a/xen/common/domain.c > +++ b/xen/common/domain.c > @@ -125,9 +125,9 @@ struct vcpu *alloc_vcpu( > > tasklet_init(&v->continue_hypercall_tasklet, NULL, 0); > > - if ( !zalloc_cpumask_var(&v->cpu_affinity) || > - !zalloc_cpumask_var(&v->cpu_affinity_tmp) || > - !zalloc_cpumask_var(&v->cpu_affinity_saved) || > + if ( !zalloc_cpumask_var(&v->cpu_hard_affinity) || > + !zalloc_cpumask_var(&v->cpu_hard_affinity_tmp) || > + !zalloc_cpumask_var(&v->cpu_hard_affinity_saved) || > !zalloc_cpumask_var(&v->cpu_soft_affinity) || > !zalloc_cpumask_var(&v->vcpu_dirty_cpumask) ) > goto fail_free; > @@ -157,9 +157,9 @@ struct vcpu *alloc_vcpu( > fail_wq: > destroy_waitqueue_vcpu(v); > fail_free: > - free_cpumask_var(v->cpu_affinity); > - free_cpumask_var(v->cpu_affinity_tmp); > - free_cpumask_var(v->cpu_affinity_saved); > + free_cpumask_var(v->cpu_hard_affinity); > + free_cpumask_var(v->cpu_hard_affinity_tmp); > + free_cpumask_var(v->cpu_hard_affinity_saved); > free_cpumask_var(v->cpu_soft_affinity); > free_cpumask_var(v->vcpu_dirty_cpumask); > free_vcpu_struct(v); > @@ -373,7 +373,7 @@ void domain_update_node_affinity(struct domain *d) > > for_each_vcpu ( d, v ) > { > - cpumask_and(online_affinity, v->cpu_affinity, online); > + cpumask_and(online_affinity, v->cpu_hard_affinity, online); > cpumask_or(cpumask, cpumask, online_affinity); > } > > @@ -736,9 +736,9 @@ static void complete_domain_destroy(struct rcu_head *head) > for ( i = d->max_vcpus - 1; i >= 0; i-- ) > if ( (v = d->vcpu[i]) != NULL ) > { > - free_cpumask_var(v->cpu_affinity); > - free_cpumask_var(v->cpu_affinity_tmp); > - free_cpumask_var(v->cpu_affinity_saved); > + free_cpumask_var(v->cpu_hard_affinity); > + free_cpumask_var(v->cpu_hard_affinity_tmp); > + free_cpumask_var(v->cpu_hard_affinity_saved); > free_cpumask_var(v->cpu_soft_affinity); > free_cpumask_var(v->vcpu_dirty_cpumask); > free_vcpu_struct(v); > @@ -878,7 +878,7 @@ int vcpu_reset(struct vcpu *v) > v->async_exception_mask = 0; > memset(v->async_exception_state, 0, sizeof(v->async_exception_state)); > #endif > - cpumask_clear(v->cpu_affinity_tmp); > + cpumask_clear(v->cpu_hard_affinity_tmp); > clear_bit(_VPF_blocked, &v->pause_flags); > clear_bit(_VPF_in_reset, &v->pause_flags); > > diff --git a/xen/common/domctl.c b/xen/common/domctl.c > index 904d27b..5e0ac5c 100644 > --- a/xen/common/domctl.c > +++ b/xen/common/domctl.c > @@ -629,7 +629,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) > else > { > ret = cpumask_to_xenctl_bitmap( > - &op->u.vcpuaffinity.cpumap, v->cpu_affinity); > + &op->u.vcpuaffinity.cpumap, v->cpu_hard_affinity); > } > } > break; > diff --git a/xen/common/keyhandler.c b/xen/common/keyhandler.c > index 33c9a37..42fb418 100644 > --- a/xen/common/keyhandler.c > +++ b/xen/common/keyhandler.c > @@ -296,7 +296,7 @@ static void dump_domains(unsigned char key) > !vcpu_event_delivery_is_enabled(v)); > cpuset_print(tmpstr, sizeof(tmpstr), v->vcpu_dirty_cpumask); > printk("dirty_cpus=%s ", tmpstr); > - cpuset_print(tmpstr, sizeof(tmpstr), v->cpu_affinity); > + cpuset_print(tmpstr, sizeof(tmpstr), v->cpu_hard_affinity); > printk("cpu_affinity=%s\n", tmpstr); > cpuset_print(tmpstr, sizeof(tmpstr), v->cpu_soft_affinity); > printk("cpu_soft_affinity=%s\n", tmpstr); > diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c > index 28dafcf..398b095 100644 > --- a/xen/common/sched_credit.c > +++ b/xen/common/sched_credit.c > @@ -332,13 +332,13 @@ csched_balance_cpumask(const struct vcpu *vc, int step, cpumask_t *mask) > if ( step == CSCHED_BALANCE_NODE_AFFINITY ) > { > cpumask_and(mask, CSCHED_DOM(vc->domain)->node_affinity_cpumask, > - vc->cpu_affinity); > + vc->cpu_hard_affinity); > > if ( unlikely(cpumask_empty(mask)) ) > - cpumask_copy(mask, vc->cpu_affinity); > + cpumask_copy(mask, vc->cpu_hard_affinity); > } > else /* step == CSCHED_BALANCE_CPU_AFFINITY */ > - cpumask_copy(mask, vc->cpu_affinity); > + cpumask_copy(mask, vc->cpu_hard_affinity); > } > > static void burn_credits(struct csched_vcpu *svc, s_time_t now) > @@ -407,7 +407,7 @@ __runq_tickle(unsigned int cpu, struct csched_vcpu *new) > > if ( balance_step == CSCHED_BALANCE_NODE_AFFINITY > && !__vcpu_has_node_affinity(new->vcpu, > - new->vcpu->cpu_affinity) ) > + new->vcpu->cpu_hard_affinity) ) > continue; > > /* Are there idlers suitable for new (for this balance step)? */ > @@ -642,7 +642,7 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, bool_t commit) > > /* Store in cpus the mask of online cpus on which the domain can run */ > online = cpupool_scheduler_cpumask(vc->domain->cpupool); > - cpumask_and(&cpus, vc->cpu_affinity, online); > + cpumask_and(&cpus, vc->cpu_hard_affinity, online); > > for_each_csched_balance_step( balance_step ) > { > @@ -1487,7 +1487,7 @@ csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step) > * or counter. > */ > if ( balance_step == CSCHED_BALANCE_NODE_AFFINITY > - && !__vcpu_has_node_affinity(vc, vc->cpu_affinity) ) > + && !__vcpu_has_node_affinity(vc, vc->cpu_hard_affinity) ) > continue; > > csched_balance_cpumask(vc, balance_step, csched_balance_mask); > diff --git a/xen/common/sched_sedf.c b/xen/common/sched_sedf.c > index 7c24171..c219aed 100644 > --- a/xen/common/sched_sedf.c > +++ b/xen/common/sched_sedf.c > @@ -396,7 +396,7 @@ static int sedf_pick_cpu(const struct scheduler *ops, struct vcpu *v) > cpumask_t *online; > > online = cpupool_scheduler_cpumask(v->domain->cpupool); > - cpumask_and(&online_affinity, v->cpu_affinity, online); > + cpumask_and(&online_affinity, v->cpu_hard_affinity, online); > return cpumask_cycle(v->vcpu_id % cpumask_weight(&online_affinity) - 1, > &online_affinity); > } > diff --git a/xen/common/schedule.c b/xen/common/schedule.c > index 5731622..28099d6 100644 > --- a/xen/common/schedule.c > +++ b/xen/common/schedule.c > @@ -194,9 +194,9 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor) > */ > v->processor = processor; > if ( is_idle_domain(d) || d->is_pinned ) > - cpumask_copy(v->cpu_affinity, cpumask_of(processor)); > + cpumask_copy(v->cpu_hard_affinity, cpumask_of(processor)); > else > - cpumask_setall(v->cpu_affinity); > + cpumask_setall(v->cpu_hard_affinity); > > cpumask_setall(v->cpu_soft_affinity); > > @@ -287,7 +287,7 @@ int sched_move_domain(struct domain *d, struct cpupool *c) > migrate_timer(&v->singleshot_timer, new_p); > migrate_timer(&v->poll_timer, new_p); > > - cpumask_setall(v->cpu_affinity); > + cpumask_setall(v->cpu_hard_affinity); > > lock = vcpu_schedule_lock_irq(v); > v->processor = new_p; > @@ -459,7 +459,7 @@ static void vcpu_migrate(struct vcpu *v) > */ > if ( pick_called && > (new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) && > - cpumask_test_cpu(new_cpu, v->cpu_affinity) && > + cpumask_test_cpu(new_cpu, v->cpu_hard_affinity) && > cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) ) > break; > > @@ -563,7 +563,7 @@ void restore_vcpu_affinity(struct domain *d) > { > printk(XENLOG_DEBUG "Restoring affinity for d%dv%d\n", > d->domain_id, v->vcpu_id); > - cpumask_copy(v->cpu_affinity, v->cpu_affinity_saved); > + cpumask_copy(v->cpu_hard_affinity, v->cpu_hard_affinity_saved); > v->affinity_broken = 0; > } > > @@ -606,20 +606,21 @@ int cpu_disable_scheduler(unsigned int cpu) > unsigned long flags; > spinlock_t *lock = vcpu_schedule_lock_irqsave(v, &flags); > > - cpumask_and(&online_affinity, v->cpu_affinity, c->cpu_valid); > + cpumask_and(&online_affinity, v->cpu_hard_affinity, c->cpu_valid); > if ( cpumask_empty(&online_affinity) && > - cpumask_test_cpu(cpu, v->cpu_affinity) ) > + cpumask_test_cpu(cpu, v->cpu_hard_affinity) ) > { > printk(XENLOG_DEBUG "Breaking affinity for d%dv%d\n", > d->domain_id, v->vcpu_id); > > if (system_state == SYS_STATE_suspend) > { > - cpumask_copy(v->cpu_affinity_saved, v->cpu_affinity); > + cpumask_copy(v->cpu_hard_affinity_saved, > + v->cpu_hard_affinity); > v->affinity_broken = 1; > } > > - cpumask_setall(v->cpu_affinity); > + cpumask_setall(v->cpu_hard_affinity); > } > > if ( v->processor == cpu ) > @@ -667,7 +668,7 @@ int vcpu_set_affinity(struct vcpu *v, const cpumask_t *affinity) > > lock = vcpu_schedule_lock_irq(v); > > - cpumask_copy(v->cpu_affinity, affinity); > + cpumask_copy(v->cpu_hard_affinity, affinity); > > /* Always ask the scheduler to re-evaluate placement > * when changing the affinity */ > diff --git a/xen/common/wait.c b/xen/common/wait.c > index 3c9366c..3f6ff41 100644 > --- a/xen/common/wait.c > +++ b/xen/common/wait.c > @@ -134,7 +134,7 @@ static void __prepare_to_wait(struct waitqueue_vcpu *wqv) > > /* Save current VCPU affinity; force wakeup on *this* CPU only. */ > wqv->wakeup_cpu = smp_processor_id(); > - cpumask_copy(&wqv->saved_affinity, curr->cpu_affinity); > + cpumask_copy(&wqv->saved_affinity, curr->cpu_hard_affinity); > if ( vcpu_set_affinity(curr, cpumask_of(wqv->wakeup_cpu)) ) > { > gdprintk(XENLOG_ERR, "Unable to set vcpu affinity\n"); > @@ -183,7 +183,7 @@ void check_wakeup_from_wait(void) > { > /* Re-set VCPU affinity and re-enter the scheduler. */ > struct vcpu *curr = current; > - cpumask_copy(&wqv->saved_affinity, curr->cpu_affinity); > + cpumask_copy(&wqv->saved_affinity, curr->cpu_hard_affinity); > if ( vcpu_set_affinity(curr, cpumask_of(wqv->wakeup_cpu)) ) > { > gdprintk(XENLOG_ERR, "Unable to set vcpu affinity\n"); > diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h > index 7e00caf..3575312 100644 > --- a/xen/include/xen/sched.h > +++ b/xen/include/xen/sched.h > @@ -192,11 +192,11 @@ struct vcpu > spinlock_t virq_lock; > > /* Bitmask of CPUs on which this VCPU may run. */ > - cpumask_var_t cpu_affinity; > + cpumask_var_t cpu_hard_affinity; > /* Used to change affinity temporarily. */ > - cpumask_var_t cpu_affinity_tmp; > + cpumask_var_t cpu_hard_affinity_tmp; > /* Used to restore affinity across S3. */ > - cpumask_var_t cpu_affinity_saved; > + cpumask_var_t cpu_hard_affinity_saved; > > /* Bitmask of CPUs on which this VCPU prefers to run. */ > cpumask_var_t cpu_soft_affinity; > @@ -795,7 +795,7 @@ void watchdog_domain_destroy(struct domain *d); > #define has_hvm_container_domain(d) ((d)->guest_type != guest_type_pv) > #define has_hvm_container_vcpu(v) (has_hvm_container_domain((v)->domain)) > #define is_pinned_vcpu(v) ((v)->domain->is_pinned || \ > - cpumask_weight((v)->cpu_affinity) == 1) > + cpumask_weight((v)->cpu_hard_affinity) == 1) > #ifdef HAS_PASSTHROUGH > #define need_iommu(d) ((d)->need_iommu) > #else >