From mboxrd@z Thu Jan 1 00:00:00 1970 From: Juergen Gross Subject: [PATCH 3 of 3] reflect cpupool in numa node affinity Date: Tue, 24 Jan 2012 06:54:12 +0100 Message-ID: <574dba7570ff785d3351.1327384452@nehalem1> References: Mime-Version: 1.0 Content-Type: multipart/mixed; boundary="===============3437071407843130718==" Return-path: In-Reply-To: List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xensource.com Errors-To: xen-devel-bounces@lists.xensource.com To: xen-devel@lists.xensource.com List-Id: xen-devel@lists.xenproject.org --===============3437071407843130718== Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit In order to prefer node local memory for a domain the numa node locality info must be built according to the cpus belonging to the cpupool of the domain. Signed-off-by: juergen.gross@ts.fujitsu.com 3 files changed, 27 insertions(+), 8 deletions(-) xen/common/cpupool.c | 9 +++++++++ xen/common/domain.c | 16 +++++++++++++++- xen/common/schedule.c | 10 +++------- --===============3437071407843130718== Content-Type: text/x-patch; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Content-Disposition: attachment; filename=xen-staging.hg-3.patch # HG changeset patch # User Juergen Gross # Date 1327384424 -3600 # Node ID 574dba7570ff785d3351051a4a0a724c63066f57 # Parent 08232960ff4bed750d26e5f1ff53972fee9e0130 reflect cpupool in numa node affinity In order to prefer node local memory for a domain the numa node locality info must be built according to the cpus belonging to the cpupool of the domain. Signed-off-by: juergen.gross@ts.fujitsu.com diff -r 08232960ff4b -r 574dba7570ff xen/common/cpupool.c --- a/xen/common/cpupool.c Tue Jan 24 06:53:30 2012 +0100 +++ b/xen/common/cpupool.c Tue Jan 24 06:53:44 2012 +0100 @@ -220,6 +220,7 @@ static int cpupool_assign_cpu_locked(str { int ret; struct cpupool *old; + struct domain *d; if ( (cpupool_moving_cpu == cpu) && (c != cpupool_cpu_moving) ) return -EBUSY; @@ -240,6 +241,14 @@ static int cpupool_assign_cpu_locked(str cpupool_cpu_moving = NULL; } cpumask_set_cpu(cpu, c->cpu_valid); + + rcu_read_lock(&domlist_read_lock); + for_each_domain_in_cpupool(d, c) + { + domain_update_node_affinity(d); + } + rcu_read_unlock(&domlist_read_lock); + return 0; } diff -r 08232960ff4b -r 574dba7570ff xen/common/domain.c --- a/xen/common/domain.c Tue Jan 24 06:53:30 2012 +0100 +++ b/xen/common/domain.c Tue Jan 24 06:53:44 2012 +0100 @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -334,17 +335,29 @@ void domain_update_node_affinity(struct void domain_update_node_affinity(struct domain *d) { cpumask_var_t cpumask; + cpumask_var_t online_affinity; + const cpumask_t *online; nodemask_t nodemask = NODE_MASK_NONE; struct vcpu *v; unsigned int node; if ( !zalloc_cpumask_var(&cpumask) ) return; + if ( !alloc_cpumask_var(&online_affinity) ) + { + free_cpumask_var(cpumask); + return; + } + + online = cpupool_online_cpumask(d->cpupool); spin_lock(&d->node_affinity_lock); for_each_vcpu ( d, v ) - cpumask_or(cpumask, cpumask, v->cpu_affinity); + { + cpumask_and(online_affinity, v->cpu_affinity, online); + cpumask_or(cpumask, cpumask, online_affinity); + } for_each_online_node ( node ) if ( cpumask_intersects(&node_to_cpumask(node), cpumask) ) @@ -353,6 +366,7 @@ void domain_update_node_affinity(struct d->node_affinity = nodemask; spin_unlock(&d->node_affinity_lock); + free_cpumask_var(online_affinity); free_cpumask_var(cpumask); } diff -r 08232960ff4b -r 574dba7570ff xen/common/schedule.c --- a/xen/common/schedule.c Tue Jan 24 06:53:30 2012 +0100 +++ b/xen/common/schedule.c Tue Jan 24 06:53:44 2012 +0100 @@ -280,11 +280,12 @@ int sched_move_domain(struct domain *d, SCHED_OP(VCPU2OP(v), insert_vcpu, v); } - domain_update_node_affinity(d); d->cpupool = c; SCHED_OP(DOM2OP(d), free_domdata, d->sched_priv); d->sched_priv = domdata; + + domain_update_node_affinity(d); domain_unpause(d); @@ -535,7 +536,6 @@ int cpu_disable_scheduler(unsigned int c struct cpupool *c; cpumask_t online_affinity; int ret = 0; - bool_t affinity_broken; c = per_cpu(cpupool, cpu); if ( c == NULL ) @@ -543,8 +543,6 @@ int cpu_disable_scheduler(unsigned int c for_each_domain_in_cpupool ( d, c ) { - affinity_broken = 0; - for_each_vcpu ( d, v ) { vcpu_schedule_lock_irq(v); @@ -556,7 +554,6 @@ int cpu_disable_scheduler(unsigned int c printk("Breaking vcpu affinity for domain %d vcpu %d\n", v->domain->domain_id, v->vcpu_id); cpumask_setall(v->cpu_affinity); - affinity_broken = 1; } if ( v->processor == cpu ) @@ -580,8 +577,7 @@ int cpu_disable_scheduler(unsigned int c ret = -EAGAIN; } - if ( affinity_broken ) - domain_update_node_affinity(d); + domain_update_node_affinity(d); } return ret; --===============3437071407843130718== Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Content-Disposition: inline _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel --===============3437071407843130718==--