xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] Reflect cpupool in numa node affinity
@ 2012-01-23  9:51 Juergen Gross
  2012-01-23 10:27 ` Jan Beulich
  2012-01-23 11:03 ` Ian Campbell
  0 siblings, 2 replies; 5+ messages in thread
From: Juergen Gross @ 2012-01-23  9:51 UTC (permalink / raw)
  To: xen-devel

[-- Attachment #1: Type: text/plain, Size: 371 bytes --]

In order to prefer node local memory for a domain the numa node locality
info must be built according to the cpus belonging to the cpupool of the
domain.

Signed-off-by: juergen.gross@ts.fujitsu.com


3 files changed, 20 insertions(+), 8 deletions(-)
xen/common/cpupool.c  |    9 +++++++++
xen/common/domain.c   |    9 ++++++++-
xen/common/schedule.c |   10 +++-------



[-- Attachment #2: xen-staging.hg.patch --]
[-- Type: text/x-patch, Size: 3856 bytes --]

# HG changeset patch
# User Juergen Gross <juergen.gross@ts.fujitsu.com>
# Date 1327311901 -3600
# Node ID cee17928d4eff5e7873f30276f424e16dca878ad
# Parent  eca719b621a1201528bfec25fb1786ec21c0c9d3
Reflect cpupool in numa node affinity

In order to prefer node local memory for a domain the numa node locality
info must be built according to the cpus belonging to the cpupool of the
domain.

Signed-off-by: juergen.gross@ts.fujitsu.com

diff -r eca719b621a1 -r cee17928d4ef xen/common/cpupool.c
--- a/xen/common/cpupool.c	Sun Jan 22 10:20:03 2012 +0000
+++ b/xen/common/cpupool.c	Mon Jan 23 10:45:01 2012 +0100
@@ -220,6 +220,7 @@ static int cpupool_assign_cpu_locked(str
 {
     int ret;
     struct cpupool *old;
+    struct domain *d;
 
     if ( (cpupool_moving_cpu == cpu) && (c != cpupool_cpu_moving) )
         return -EBUSY;
@@ -240,6 +241,14 @@ static int cpupool_assign_cpu_locked(str
         cpupool_cpu_moving = NULL;
     }
     cpumask_set_cpu(cpu, c->cpu_valid);
+    
+    rcu_read_lock(&domlist_read_lock);
+    for_each_domain_in_cpupool(d, c)
+    {
+        domain_update_node_affinity(d);
+    }
+    rcu_read_unlock(&domlist_read_lock);
+
     return 0;
 }
 
diff -r eca719b621a1 -r cee17928d4ef xen/common/domain.c
--- a/xen/common/domain.c	Sun Jan 22 10:20:03 2012 +0000
+++ b/xen/common/domain.c	Mon Jan 23 10:45:01 2012 +0100
@@ -11,6 +11,7 @@
 #include <xen/ctype.h>
 #include <xen/errno.h>
 #include <xen/sched.h>
+#include <xen/sched-if.h>
 #include <xen/domain.h>
 #include <xen/mm.h>
 #include <xen/event.h>
@@ -365,15 +366,21 @@ void domain_update_node_affinity(struct 
 void domain_update_node_affinity(struct domain *d)
 {
     cpumask_t cpumask;
+    cpumask_t online_affinity;
+    cpumask_t *online;
     nodemask_t nodemask = NODE_MASK_NONE;
     struct vcpu *v;
     unsigned int node;
 
+    online = (d->cpupool == NULL) ? &cpu_online_map : d->cpupool->cpu_valid;
     cpumask_clear(&cpumask);
     spin_lock(&d->node_affinity_lock);
 
     for_each_vcpu ( d, v )
-        cpumask_or(&cpumask, &cpumask, v->cpu_affinity);
+    {
+        cpumask_and(&online_affinity, v->cpu_affinity, online);
+        cpumask_or(&cpumask, &cpumask, &online_affinity);
+    }
 
     for_each_online_node ( node )
         if ( cpumask_intersects(&node_to_cpumask(node), &cpumask) )
diff -r eca719b621a1 -r cee17928d4ef xen/common/schedule.c
--- a/xen/common/schedule.c	Sun Jan 22 10:20:03 2012 +0000
+++ b/xen/common/schedule.c	Mon Jan 23 10:45:01 2012 +0100
@@ -282,11 +282,12 @@ int sched_move_domain(struct domain *d, 
 
         SCHED_OP(VCPU2OP(v), insert_vcpu, v);
     }
-    domain_update_node_affinity(d);
 
     d->cpupool = c;
     SCHED_OP(DOM2OP(d), free_domdata, d->sched_priv);
     d->sched_priv = domdata;
+
+    domain_update_node_affinity(d);
 
     domain_unpause(d);
 
@@ -537,7 +538,6 @@ int cpu_disable_scheduler(unsigned int c
     struct cpupool *c;
     cpumask_t online_affinity;
     int    ret = 0;
-    bool_t affinity_broken;
 
     c = per_cpu(cpupool, cpu);
     if ( c == NULL )
@@ -545,8 +545,6 @@ int cpu_disable_scheduler(unsigned int c
 
     for_each_domain_in_cpupool ( d, c )
     {
-        affinity_broken = 0;
-
         for_each_vcpu ( d, v )
         {
             vcpu_schedule_lock_irq(v);
@@ -558,7 +556,6 @@ int cpu_disable_scheduler(unsigned int c
                 printk("Breaking vcpu affinity for domain %d vcpu %d\n",
                         v->domain->domain_id, v->vcpu_id);
                 cpumask_setall(v->cpu_affinity);
-                affinity_broken = 1;
             }
 
             if ( v->processor == cpu )
@@ -582,8 +579,7 @@ int cpu_disable_scheduler(unsigned int c
                 ret = -EAGAIN;
         }
 
-        if ( affinity_broken )
-            domain_update_node_affinity(d);
+        domain_update_node_affinity(d);
     }
 
     return ret;

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2012-01-23 11:31 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-01-23  9:51 [PATCH] Reflect cpupool in numa node affinity Juergen Gross
2012-01-23 10:27 ` Jan Beulich
2012-01-23 10:45   ` Juergen Gross
2012-01-23 11:03 ` Ian Campbell
2012-01-23 11:31   ` Juergen Gross

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).