xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] x86/S3: Restore broken vcpu affinity on resume
@ 2013-03-26 17:20 Ben Guthro
  2013-03-26 17:23 ` Ben Guthro
                   ` (3 more replies)
  0 siblings, 4 replies; 11+ messages in thread
From: Ben Guthro @ 2013-03-26 17:20 UTC (permalink / raw)
  To: xen-devel; +Cc: Ben Guthro

When in SYS_STATE_suspend, and going through the cpu_disable_scheduler
path, save a copy of the current cpu affinity, and mark a flag to
restore it later.

Later, in the resume process, when enabling nonboot cpus restore these
affinities.

This is the second submission of this patch.
Primary differences from the first patch is to fix formatting problems.
However, when doing so, I tested with another patch in the
cpu_disable_scheduler() path that is also appropriate here.

Signed-off-by: Ben Guthro <benjamin.guthro@citrix.com>
---
 xen/common/cpu.c           |    3 +++
 xen/common/cpupool.c       |    5 +----
 xen/common/domain.c        |    2 ++
 xen/common/schedule.c      |   49 +++++++++++++++++++++++++++++++++++++++++++-
 xen/include/xen/sched-if.h |    5 +++++
 xen/include/xen/sched.h    |    6 ++++++
 6 files changed, 65 insertions(+), 5 deletions(-)

diff --git a/xen/common/cpu.c b/xen/common/cpu.c
index 630881e..3c77406 100644
--- a/xen/common/cpu.c
+++ b/xen/common/cpu.c
@@ -215,4 +215,7 @@ void enable_nonboot_cpus(void)
     }
 
     cpumask_clear(&frozen_cpus);
+
+    if (system_state == SYS_STATE_resume)
+        restore_vcpu_affinity();
 }
diff --git a/xen/common/cpupool.c b/xen/common/cpupool.c
index 10b10f8..7a04f5e 100644
--- a/xen/common/cpupool.c
+++ b/xen/common/cpupool.c
@@ -19,13 +19,10 @@
 #include <xen/sched-if.h>
 #include <xen/cpu.h>
 
-#define for_each_cpupool(ptr)    \
-    for ((ptr) = &cpupool_list; *(ptr) != NULL; (ptr) = &((*(ptr))->next))
-
 struct cpupool *cpupool0;                /* Initial cpupool with Dom0 */
 cpumask_t cpupool_free_cpus;             /* cpus not in any cpupool */
 
-static struct cpupool *cpupool_list;     /* linked list, sorted by poolid */
+struct cpupool *cpupool_list;     /* linked list, sorted by poolid */
 
 static int cpupool_moving_cpu = -1;
 static struct cpupool *cpupool_cpu_moving = NULL;
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 64ee29d..590548e 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -126,6 +126,7 @@ struct vcpu *alloc_vcpu(
 
     if ( !zalloc_cpumask_var(&v->cpu_affinity) ||
          !zalloc_cpumask_var(&v->cpu_affinity_tmp) ||
+         !zalloc_cpumask_var(&v->cpu_affinity_saved) ||
          !zalloc_cpumask_var(&v->vcpu_dirty_cpumask) )
         goto fail_free;
 
@@ -155,6 +156,7 @@ struct vcpu *alloc_vcpu(
  fail_free:
         free_cpumask_var(v->cpu_affinity);
         free_cpumask_var(v->cpu_affinity_tmp);
+        free_cpumask_var(v->cpu_affinity_saved);
         free_cpumask_var(v->vcpu_dirty_cpumask);
         free_vcpu_struct(v);
         return NULL;
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 83fae4c..3e4d4ad 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -541,6 +541,46 @@ void vcpu_force_reschedule(struct vcpu *v)
     }
 }
 
+void restore_vcpu_affinity()
+{
+    struct domain *d;
+    struct vcpu *v;
+    struct cpupool **c;
+
+    for_each_cpupool(c)
+    {
+        for_each_domain_in_cpupool ( d, *c )
+        {
+            for_each_vcpu ( d, v )
+            {
+                vcpu_schedule_lock_irq(v);
+
+                if (v->affinity_broken)
+                {
+                    printk("Restoring vcpu affinity for domain %d vcpu %d\n",
+                            v->domain->domain_id, v->vcpu_id);
+                    cpumask_copy(v->cpu_affinity, v->cpu_affinity_saved);
+                    v->affinity_broken = 0;
+                }
+
+                if ( v->processor == smp_processor_id() )
+                {
+                    set_bit(_VPF_migrating, &v->pause_flags);
+                    vcpu_schedule_unlock_irq(v);
+                    vcpu_sleep_nosync(v);
+                    vcpu_migrate(v);
+                }
+                else
+                {
+                    vcpu_schedule_unlock_irq(v);
+                }
+            }
+
+            domain_update_node_affinity(d);
+        }
+    }
+}
+
 /*
  * This function is used by cpu_hotplug code from stop_machine context
  * and from cpupools to switch schedulers on a cpu.
@@ -554,7 +594,7 @@ int cpu_disable_scheduler(unsigned int cpu)
     int    ret = 0;
 
     c = per_cpu(cpupool, cpu);
-    if ( (c == NULL) || (system_state == SYS_STATE_suspend) )
+    if ( c == NULL )
         return ret;
 
     for_each_domain_in_cpupool ( d, c )
@@ -569,6 +609,13 @@ int cpu_disable_scheduler(unsigned int cpu)
             {
                 printk("Breaking vcpu affinity for domain %d vcpu %d\n",
                         v->domain->domain_id, v->vcpu_id);
+
+                if (system_state == SYS_STATE_suspend)
+		{
+                    cpumask_copy(v->cpu_affinity_saved, v->cpu_affinity);
+                    v->affinity_broken = 1;
+                }
+
                 cpumask_setall(v->cpu_affinity);
             }
 
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 9ace22c..547e71e 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -13,6 +13,9 @@
 /* A global pointer to the initial cpupool (POOL0). */
 extern struct cpupool *cpupool0;
 
+/* linked list of cpu pools */
+extern struct cpupool *cpupool_list;
+
 /* cpus currently in no cpupool */
 extern cpumask_t cpupool_free_cpus;
 
@@ -211,5 +214,7 @@ struct cpupool
     (((_pool) == NULL) ? &cpupool_free_cpus : (_pool)->cpu_valid)
 #define cpupool_online_cpumask(_pool) \
     (((_pool) == NULL) ? &cpu_online_map : (_pool)->cpu_valid)
+#define for_each_cpupool(ptr)    \
+    for ((ptr) = &cpupool_list; *(ptr) != NULL; (ptr) = &((*(ptr))->next))
 
 #endif /* __XEN_SCHED_IF_H__ */
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index cabaf27..d24fc6b 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -153,6 +153,9 @@ struct vcpu
     bool_t           defer_shutdown;
     /* VCPU is paused following shutdown request (d->is_shutting_down)? */
     bool_t           paused_for_shutdown;
+    /* VCPU need affinity restored */
+    bool_t           affinity_broken;
+
 
     /*
      * > 0: a single port is being polled;
@@ -175,6 +178,8 @@ struct vcpu
     cpumask_var_t    cpu_affinity;
     /* Used to change affinity temporarily. */
     cpumask_var_t    cpu_affinity_tmp;
+    /* Used to restore affinity across S3. */
+    cpumask_var_t    cpu_affinity_saved;
 
     /* Bitmask of CPUs which are holding onto this VCPU's state. */
     cpumask_var_t    vcpu_dirty_cpumask;
@@ -697,6 +702,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c);
 void vcpu_force_reschedule(struct vcpu *v);
 int cpu_disable_scheduler(unsigned int cpu);
 int vcpu_set_affinity(struct vcpu *v, const cpumask_t *affinity);
+void restore_vcpu_affinity(void);
 
 void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
 uint64_t get_cpu_idle_time(unsigned int cpu);
-- 
1.7.9.5

^ permalink raw reply related	[flat|nested] 11+ messages in thread
* [PATCH] x86/S3: Restore broken vcpu affinity on resume
@ 2013-03-26 16:12 Ben Guthro
  2013-03-26 16:54 ` Jan Beulich
  0 siblings, 1 reply; 11+ messages in thread
From: Ben Guthro @ 2013-03-26 16:12 UTC (permalink / raw)
  To: xen-devel; +Cc: Ben Guthro

When in SYS_STATE_suspend, and going through the cpu_disable_scheduler
path, save a copy of the current cpu affinity, and mark a flag to
restore it later.

Later, in the resume process, when enabling nonboot cpus restore these
affinities.

Signed-off-by: Ben Guthro <benjamin.guthro@citrix.com>
---
 xen/common/cpu.c           |    3 +++
 xen/common/cpupool.c       |    5 +----
 xen/common/domain.c        |    2 ++
 xen/common/schedule.c      |   46 ++++++++++++++++++++++++++++++++++++++++++++
 xen/include/xen/sched-if.h |    5 +++++
 xen/include/xen/sched.h    |    6 ++++++
 6 files changed, 63 insertions(+), 4 deletions(-)

diff --git a/xen/common/cpu.c b/xen/common/cpu.c
index 630881e..786966a 100644
--- a/xen/common/cpu.c
+++ b/xen/common/cpu.c
@@ -215,4 +215,7 @@ void enable_nonboot_cpus(void)
     }
 
     cpumask_clear(&frozen_cpus);
+
+    if (system_state == SYS_STATE_resume)
+	restore_vcpu_affinity();
 }
diff --git a/xen/common/cpupool.c b/xen/common/cpupool.c
index 10b10f8..7a04f5e 100644
--- a/xen/common/cpupool.c
+++ b/xen/common/cpupool.c
@@ -19,13 +19,10 @@
 #include <xen/sched-if.h>
 #include <xen/cpu.h>
 
-#define for_each_cpupool(ptr)    \
-    for ((ptr) = &cpupool_list; *(ptr) != NULL; (ptr) = &((*(ptr))->next))
-
 struct cpupool *cpupool0;                /* Initial cpupool with Dom0 */
 cpumask_t cpupool_free_cpus;             /* cpus not in any cpupool */
 
-static struct cpupool *cpupool_list;     /* linked list, sorted by poolid */
+struct cpupool *cpupool_list;     /* linked list, sorted by poolid */
 
 static int cpupool_moving_cpu = -1;
 static struct cpupool *cpupool_cpu_moving = NULL;
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 64ee29d..590548e 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -126,6 +126,7 @@ struct vcpu *alloc_vcpu(
 
     if ( !zalloc_cpumask_var(&v->cpu_affinity) ||
          !zalloc_cpumask_var(&v->cpu_affinity_tmp) ||
+         !zalloc_cpumask_var(&v->cpu_affinity_saved) ||
          !zalloc_cpumask_var(&v->vcpu_dirty_cpumask) )
         goto fail_free;
 
@@ -155,6 +156,7 @@ struct vcpu *alloc_vcpu(
  fail_free:
         free_cpumask_var(v->cpu_affinity);
         free_cpumask_var(v->cpu_affinity_tmp);
+        free_cpumask_var(v->cpu_affinity_saved);
         free_cpumask_var(v->vcpu_dirty_cpumask);
         free_vcpu_struct(v);
         return NULL;
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 83fae4c..59a1def 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -541,6 +541,46 @@ void vcpu_force_reschedule(struct vcpu *v)
     }
 }
 
+void restore_vcpu_affinity()
+{
+    struct domain *d;
+    struct vcpu *v;
+    struct cpupool **c;
+
+    for_each_cpupool(c)
+    {
+        for_each_domain_in_cpupool ( d, *c )
+        {
+            for_each_vcpu ( d, v )
+            {
+                vcpu_schedule_lock_irq(v);
+
+                if (v->affinity_broken)
+	        {
+	            printk("Restoring vcpu affinity for domain %d vcpu %d\n",
+                            v->domain->domain_id, v->vcpu_id);
+		    cpumask_copy(v->cpu_affinity, v->cpu_affinity_saved);
+		    v->affinity_broken = 0;
+	        }
+
+	        if ( v->processor == smp_processor_id() )
+                {
+                    set_bit(_VPF_migrating, &v->pause_flags);
+                    vcpu_schedule_unlock_irq(v);
+                    vcpu_sleep_nosync(v);
+                    vcpu_migrate(v);
+                }
+                else
+                {
+                    vcpu_schedule_unlock_irq(v);
+                }
+            }
+
+            domain_update_node_affinity(d);
+        }
+    }
+}
+
 /*
  * This function is used by cpu_hotplug code from stop_machine context
  * and from cpupools to switch schedulers on a cpu.
@@ -569,6 +609,12 @@ int cpu_disable_scheduler(unsigned int cpu)
             {
                 printk("Breaking vcpu affinity for domain %d vcpu %d\n",
                         v->domain->domain_id, v->vcpu_id);
+
+		if (system_state == SYS_STATE_suspend) {
+			cpumask_copy(v->cpu_affinity_saved, v->cpu_affinity);
+			v->affinity_broken = 1;
+		}
+
                 cpumask_setall(v->cpu_affinity);
             }
 
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 9ace22c..547e71e 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -13,6 +13,9 @@
 /* A global pointer to the initial cpupool (POOL0). */
 extern struct cpupool *cpupool0;
 
+/* linked list of cpu pools */
+extern struct cpupool *cpupool_list;
+
 /* cpus currently in no cpupool */
 extern cpumask_t cpupool_free_cpus;
 
@@ -211,5 +214,7 @@ struct cpupool
     (((_pool) == NULL) ? &cpupool_free_cpus : (_pool)->cpu_valid)
 #define cpupool_online_cpumask(_pool) \
     (((_pool) == NULL) ? &cpu_online_map : (_pool)->cpu_valid)
+#define for_each_cpupool(ptr)    \
+    for ((ptr) = &cpupool_list; *(ptr) != NULL; (ptr) = &((*(ptr))->next))
 
 #endif /* __XEN_SCHED_IF_H__ */
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index cabaf27..d24fc6b 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -153,6 +153,9 @@ struct vcpu
     bool_t           defer_shutdown;
     /* VCPU is paused following shutdown request (d->is_shutting_down)? */
     bool_t           paused_for_shutdown;
+    /* VCPU need affinity restored */
+    bool_t           affinity_broken;
+
 
     /*
      * > 0: a single port is being polled;
@@ -175,6 +178,8 @@ struct vcpu
     cpumask_var_t    cpu_affinity;
     /* Used to change affinity temporarily. */
     cpumask_var_t    cpu_affinity_tmp;
+    /* Used to restore affinity across S3. */
+    cpumask_var_t    cpu_affinity_saved;
 
     /* Bitmask of CPUs which are holding onto this VCPU's state. */
     cpumask_var_t    vcpu_dirty_cpumask;
@@ -697,6 +702,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c);
 void vcpu_force_reschedule(struct vcpu *v);
 int cpu_disable_scheduler(unsigned int cpu);
 int vcpu_set_affinity(struct vcpu *v, const cpumask_t *affinity);
+void restore_vcpu_affinity(void);
 
 void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
 uint64_t get_cpu_idle_time(unsigned int cpu);
-- 
1.7.9.5

^ permalink raw reply related	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2013-03-27 12:06 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-03-26 17:20 [PATCH] x86/S3: Restore broken vcpu affinity on resume Ben Guthro
2013-03-26 17:23 ` Ben Guthro
2013-03-27  6:06 ` Juergen Gross
2013-03-27  9:19 ` Jan Beulich
2013-03-27 12:01 ` George Dunlap
2013-03-27 12:04   ` Ben Guthro
2013-03-27 12:06     ` George Dunlap
  -- strict thread matches above, loose matches on Subject: below --
2013-03-26 16:12 Ben Guthro
2013-03-26 16:54 ` Jan Beulich
2013-03-26 16:58   ` Ben Guthro
2013-03-26 17:04     ` Jan Beulich

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).