xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Ben Guthro <benjamin.guthro@citrix.com>
To: xen-devel@lists.xen.org
Cc: Ben Guthro <benjamin.guthro@citrix.com>
Subject: [PATCH] x86/S3: Restore broken vcpu affinity on resume
Date: Tue, 26 Mar 2013 12:12:32 -0400	[thread overview]
Message-ID: <1364314352-20280-1-git-send-email-benjamin.guthro@citrix.com> (raw)

When in SYS_STATE_suspend, and going through the cpu_disable_scheduler
path, save a copy of the current cpu affinity, and mark a flag to
restore it later.

Later, in the resume process, when enabling nonboot cpus restore these
affinities.

Signed-off-by: Ben Guthro <benjamin.guthro@citrix.com>
---
 xen/common/cpu.c           |    3 +++
 xen/common/cpupool.c       |    5 +----
 xen/common/domain.c        |    2 ++
 xen/common/schedule.c      |   46 ++++++++++++++++++++++++++++++++++++++++++++
 xen/include/xen/sched-if.h |    5 +++++
 xen/include/xen/sched.h    |    6 ++++++
 6 files changed, 63 insertions(+), 4 deletions(-)

diff --git a/xen/common/cpu.c b/xen/common/cpu.c
index 630881e..786966a 100644
--- a/xen/common/cpu.c
+++ b/xen/common/cpu.c
@@ -215,4 +215,7 @@ void enable_nonboot_cpus(void)
     }
 
     cpumask_clear(&frozen_cpus);
+
+    if (system_state == SYS_STATE_resume)
+	restore_vcpu_affinity();
 }
diff --git a/xen/common/cpupool.c b/xen/common/cpupool.c
index 10b10f8..7a04f5e 100644
--- a/xen/common/cpupool.c
+++ b/xen/common/cpupool.c
@@ -19,13 +19,10 @@
 #include <xen/sched-if.h>
 #include <xen/cpu.h>
 
-#define for_each_cpupool(ptr)    \
-    for ((ptr) = &cpupool_list; *(ptr) != NULL; (ptr) = &((*(ptr))->next))
-
 struct cpupool *cpupool0;                /* Initial cpupool with Dom0 */
 cpumask_t cpupool_free_cpus;             /* cpus not in any cpupool */
 
-static struct cpupool *cpupool_list;     /* linked list, sorted by poolid */
+struct cpupool *cpupool_list;     /* linked list, sorted by poolid */
 
 static int cpupool_moving_cpu = -1;
 static struct cpupool *cpupool_cpu_moving = NULL;
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 64ee29d..590548e 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -126,6 +126,7 @@ struct vcpu *alloc_vcpu(
 
     if ( !zalloc_cpumask_var(&v->cpu_affinity) ||
          !zalloc_cpumask_var(&v->cpu_affinity_tmp) ||
+         !zalloc_cpumask_var(&v->cpu_affinity_saved) ||
          !zalloc_cpumask_var(&v->vcpu_dirty_cpumask) )
         goto fail_free;
 
@@ -155,6 +156,7 @@ struct vcpu *alloc_vcpu(
  fail_free:
         free_cpumask_var(v->cpu_affinity);
         free_cpumask_var(v->cpu_affinity_tmp);
+        free_cpumask_var(v->cpu_affinity_saved);
         free_cpumask_var(v->vcpu_dirty_cpumask);
         free_vcpu_struct(v);
         return NULL;
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 83fae4c..59a1def 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -541,6 +541,46 @@ void vcpu_force_reschedule(struct vcpu *v)
     }
 }
 
+void restore_vcpu_affinity()
+{
+    struct domain *d;
+    struct vcpu *v;
+    struct cpupool **c;
+
+    for_each_cpupool(c)
+    {
+        for_each_domain_in_cpupool ( d, *c )
+        {
+            for_each_vcpu ( d, v )
+            {
+                vcpu_schedule_lock_irq(v);
+
+                if (v->affinity_broken)
+	        {
+	            printk("Restoring vcpu affinity for domain %d vcpu %d\n",
+                            v->domain->domain_id, v->vcpu_id);
+		    cpumask_copy(v->cpu_affinity, v->cpu_affinity_saved);
+		    v->affinity_broken = 0;
+	        }
+
+	        if ( v->processor == smp_processor_id() )
+                {
+                    set_bit(_VPF_migrating, &v->pause_flags);
+                    vcpu_schedule_unlock_irq(v);
+                    vcpu_sleep_nosync(v);
+                    vcpu_migrate(v);
+                }
+                else
+                {
+                    vcpu_schedule_unlock_irq(v);
+                }
+            }
+
+            domain_update_node_affinity(d);
+        }
+    }
+}
+
 /*
  * This function is used by cpu_hotplug code from stop_machine context
  * and from cpupools to switch schedulers on a cpu.
@@ -569,6 +609,12 @@ int cpu_disable_scheduler(unsigned int cpu)
             {
                 printk("Breaking vcpu affinity for domain %d vcpu %d\n",
                         v->domain->domain_id, v->vcpu_id);
+
+		if (system_state == SYS_STATE_suspend) {
+			cpumask_copy(v->cpu_affinity_saved, v->cpu_affinity);
+			v->affinity_broken = 1;
+		}
+
                 cpumask_setall(v->cpu_affinity);
             }
 
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 9ace22c..547e71e 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -13,6 +13,9 @@
 /* A global pointer to the initial cpupool (POOL0). */
 extern struct cpupool *cpupool0;
 
+/* linked list of cpu pools */
+extern struct cpupool *cpupool_list;
+
 /* cpus currently in no cpupool */
 extern cpumask_t cpupool_free_cpus;
 
@@ -211,5 +214,7 @@ struct cpupool
     (((_pool) == NULL) ? &cpupool_free_cpus : (_pool)->cpu_valid)
 #define cpupool_online_cpumask(_pool) \
     (((_pool) == NULL) ? &cpu_online_map : (_pool)->cpu_valid)
+#define for_each_cpupool(ptr)    \
+    for ((ptr) = &cpupool_list; *(ptr) != NULL; (ptr) = &((*(ptr))->next))
 
 #endif /* __XEN_SCHED_IF_H__ */
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index cabaf27..d24fc6b 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -153,6 +153,9 @@ struct vcpu
     bool_t           defer_shutdown;
     /* VCPU is paused following shutdown request (d->is_shutting_down)? */
     bool_t           paused_for_shutdown;
+    /* VCPU need affinity restored */
+    bool_t           affinity_broken;
+
 
     /*
      * > 0: a single port is being polled;
@@ -175,6 +178,8 @@ struct vcpu
     cpumask_var_t    cpu_affinity;
     /* Used to change affinity temporarily. */
     cpumask_var_t    cpu_affinity_tmp;
+    /* Used to restore affinity across S3. */
+    cpumask_var_t    cpu_affinity_saved;
 
     /* Bitmask of CPUs which are holding onto this VCPU's state. */
     cpumask_var_t    vcpu_dirty_cpumask;
@@ -697,6 +702,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c);
 void vcpu_force_reschedule(struct vcpu *v);
 int cpu_disable_scheduler(unsigned int cpu);
 int vcpu_set_affinity(struct vcpu *v, const cpumask_t *affinity);
+void restore_vcpu_affinity(void);
 
 void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
 uint64_t get_cpu_idle_time(unsigned int cpu);
-- 
1.7.9.5

             reply	other threads:[~2013-03-26 16:12 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-03-26 16:12 Ben Guthro [this message]
2013-03-26 16:54 ` [PATCH] x86/S3: Restore broken vcpu affinity on resume Jan Beulich
2013-03-26 16:58   ` Ben Guthro
2013-03-26 17:04     ` Jan Beulich
  -- strict thread matches above, loose matches on Subject: below --
2013-03-26 17:20 Ben Guthro
2013-03-26 17:23 ` Ben Guthro
2013-03-27  6:06 ` Juergen Gross
2013-03-27  9:19 ` Jan Beulich
2013-03-27 12:01 ` George Dunlap
2013-03-27 12:04   ` Ben Guthro
2013-03-27 12:06     ` George Dunlap

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1364314352-20280-1-git-send-email-benjamin.guthro@citrix.com \
    --to=benjamin.guthro@citrix.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).