* [PATCH] x86/S3: Restore broken vcpu affinity on resume
@ 2013-03-26 16:12 Ben Guthro
2013-03-26 16:54 ` Jan Beulich
0 siblings, 1 reply; 11+ messages in thread
From: Ben Guthro @ 2013-03-26 16:12 UTC (permalink / raw)
To: xen-devel; +Cc: Ben Guthro
When in SYS_STATE_suspend, and going through the cpu_disable_scheduler
path, save a copy of the current cpu affinity, and mark a flag to
restore it later.
Later, in the resume process, when enabling nonboot cpus restore these
affinities.
Signed-off-by: Ben Guthro <benjamin.guthro@citrix.com>
---
xen/common/cpu.c | 3 +++
xen/common/cpupool.c | 5 +----
xen/common/domain.c | 2 ++
xen/common/schedule.c | 46 ++++++++++++++++++++++++++++++++++++++++++++
xen/include/xen/sched-if.h | 5 +++++
xen/include/xen/sched.h | 6 ++++++
6 files changed, 63 insertions(+), 4 deletions(-)
diff --git a/xen/common/cpu.c b/xen/common/cpu.c
index 630881e..786966a 100644
--- a/xen/common/cpu.c
+++ b/xen/common/cpu.c
@@ -215,4 +215,7 @@ void enable_nonboot_cpus(void)
}
cpumask_clear(&frozen_cpus);
+
+ if (system_state == SYS_STATE_resume)
+ restore_vcpu_affinity();
}
diff --git a/xen/common/cpupool.c b/xen/common/cpupool.c
index 10b10f8..7a04f5e 100644
--- a/xen/common/cpupool.c
+++ b/xen/common/cpupool.c
@@ -19,13 +19,10 @@
#include <xen/sched-if.h>
#include <xen/cpu.h>
-#define for_each_cpupool(ptr) \
- for ((ptr) = &cpupool_list; *(ptr) != NULL; (ptr) = &((*(ptr))->next))
-
struct cpupool *cpupool0; /* Initial cpupool with Dom0 */
cpumask_t cpupool_free_cpus; /* cpus not in any cpupool */
-static struct cpupool *cpupool_list; /* linked list, sorted by poolid */
+struct cpupool *cpupool_list; /* linked list, sorted by poolid */
static int cpupool_moving_cpu = -1;
static struct cpupool *cpupool_cpu_moving = NULL;
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 64ee29d..590548e 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -126,6 +126,7 @@ struct vcpu *alloc_vcpu(
if ( !zalloc_cpumask_var(&v->cpu_affinity) ||
!zalloc_cpumask_var(&v->cpu_affinity_tmp) ||
+ !zalloc_cpumask_var(&v->cpu_affinity_saved) ||
!zalloc_cpumask_var(&v->vcpu_dirty_cpumask) )
goto fail_free;
@@ -155,6 +156,7 @@ struct vcpu *alloc_vcpu(
fail_free:
free_cpumask_var(v->cpu_affinity);
free_cpumask_var(v->cpu_affinity_tmp);
+ free_cpumask_var(v->cpu_affinity_saved);
free_cpumask_var(v->vcpu_dirty_cpumask);
free_vcpu_struct(v);
return NULL;
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 83fae4c..59a1def 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -541,6 +541,46 @@ void vcpu_force_reschedule(struct vcpu *v)
}
}
+void restore_vcpu_affinity()
+{
+ struct domain *d;
+ struct vcpu *v;
+ struct cpupool **c;
+
+ for_each_cpupool(c)
+ {
+ for_each_domain_in_cpupool ( d, *c )
+ {
+ for_each_vcpu ( d, v )
+ {
+ vcpu_schedule_lock_irq(v);
+
+ if (v->affinity_broken)
+ {
+ printk("Restoring vcpu affinity for domain %d vcpu %d\n",
+ v->domain->domain_id, v->vcpu_id);
+ cpumask_copy(v->cpu_affinity, v->cpu_affinity_saved);
+ v->affinity_broken = 0;
+ }
+
+ if ( v->processor == smp_processor_id() )
+ {
+ set_bit(_VPF_migrating, &v->pause_flags);
+ vcpu_schedule_unlock_irq(v);
+ vcpu_sleep_nosync(v);
+ vcpu_migrate(v);
+ }
+ else
+ {
+ vcpu_schedule_unlock_irq(v);
+ }
+ }
+
+ domain_update_node_affinity(d);
+ }
+ }
+}
+
/*
* This function is used by cpu_hotplug code from stop_machine context
* and from cpupools to switch schedulers on a cpu.
@@ -569,6 +609,12 @@ int cpu_disable_scheduler(unsigned int cpu)
{
printk("Breaking vcpu affinity for domain %d vcpu %d\n",
v->domain->domain_id, v->vcpu_id);
+
+ if (system_state == SYS_STATE_suspend) {
+ cpumask_copy(v->cpu_affinity_saved, v->cpu_affinity);
+ v->affinity_broken = 1;
+ }
+
cpumask_setall(v->cpu_affinity);
}
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 9ace22c..547e71e 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -13,6 +13,9 @@
/* A global pointer to the initial cpupool (POOL0). */
extern struct cpupool *cpupool0;
+/* linked list of cpu pools */
+extern struct cpupool *cpupool_list;
+
/* cpus currently in no cpupool */
extern cpumask_t cpupool_free_cpus;
@@ -211,5 +214,7 @@ struct cpupool
(((_pool) == NULL) ? &cpupool_free_cpus : (_pool)->cpu_valid)
#define cpupool_online_cpumask(_pool) \
(((_pool) == NULL) ? &cpu_online_map : (_pool)->cpu_valid)
+#define for_each_cpupool(ptr) \
+ for ((ptr) = &cpupool_list; *(ptr) != NULL; (ptr) = &((*(ptr))->next))
#endif /* __XEN_SCHED_IF_H__ */
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index cabaf27..d24fc6b 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -153,6 +153,9 @@ struct vcpu
bool_t defer_shutdown;
/* VCPU is paused following shutdown request (d->is_shutting_down)? */
bool_t paused_for_shutdown;
+ /* VCPU need affinity restored */
+ bool_t affinity_broken;
+
/*
* > 0: a single port is being polled;
@@ -175,6 +178,8 @@ struct vcpu
cpumask_var_t cpu_affinity;
/* Used to change affinity temporarily. */
cpumask_var_t cpu_affinity_tmp;
+ /* Used to restore affinity across S3. */
+ cpumask_var_t cpu_affinity_saved;
/* Bitmask of CPUs which are holding onto this VCPU's state. */
cpumask_var_t vcpu_dirty_cpumask;
@@ -697,6 +702,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c);
void vcpu_force_reschedule(struct vcpu *v);
int cpu_disable_scheduler(unsigned int cpu);
int vcpu_set_affinity(struct vcpu *v, const cpumask_t *affinity);
+void restore_vcpu_affinity(void);
void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
uint64_t get_cpu_idle_time(unsigned int cpu);
--
1.7.9.5
^ permalink raw reply related [flat|nested] 11+ messages in thread
* Re: [PATCH] x86/S3: Restore broken vcpu affinity on resume
2013-03-26 16:12 [PATCH] x86/S3: Restore broken vcpu affinity on resume Ben Guthro
@ 2013-03-26 16:54 ` Jan Beulich
2013-03-26 16:58 ` Ben Guthro
0 siblings, 1 reply; 11+ messages in thread
From: Jan Beulich @ 2013-03-26 16:54 UTC (permalink / raw)
To: Ben Guthro; +Cc: xen-devel
>>> On 26.03.13 at 17:12, Ben Guthro <benjamin.guthro@citrix.com> wrote:
> When in SYS_STATE_suspend, and going through the cpu_disable_scheduler
> path, save a copy of the current cpu affinity, and mark a flag to
> restore it later.
>
> Later, in the resume process, when enabling nonboot cpus restore these
> affinities.
>
> Signed-off-by: Ben Guthro <benjamin.guthro@citrix.com>
Looks reasonable to me, minus severe formatting issues (hard
tabs being the most obvious, but not the only problem).
> --- a/xen/include/xen/sched-if.h
> +++ b/xen/include/xen/sched-if.h
> @@ -13,6 +13,9 @@
> /* A global pointer to the initial cpupool (POOL0). */
> extern struct cpupool *cpupool0;
>
> +/* linked list of cpu pools */
> +extern struct cpupool *cpupool_list;
> +
> /* cpus currently in no cpupool */
> extern cpumask_t cpupool_free_cpus;
>
> @@ -211,5 +214,7 @@ struct cpupool
> (((_pool) == NULL) ? &cpupool_free_cpus : (_pool)->cpu_valid)
> #define cpupool_online_cpumask(_pool) \
> (((_pool) == NULL) ? &cpu_online_map : (_pool)->cpu_valid)
> +#define for_each_cpupool(ptr) \
> + for ((ptr) = &cpupool_list; *(ptr) != NULL; (ptr) = &((*(ptr))->next))
>
> #endif /* __XEN_SCHED_IF_H__ */
I don't particularly like the increase of scope for these two, but
I guess there aren't many alternatives.
Jan
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH] x86/S3: Restore broken vcpu affinity on resume
2013-03-26 16:54 ` Jan Beulich
@ 2013-03-26 16:58 ` Ben Guthro
2013-03-26 17:04 ` Jan Beulich
0 siblings, 1 reply; 11+ messages in thread
From: Ben Guthro @ 2013-03-26 16:58 UTC (permalink / raw)
To: Jan Beulich; +Cc: xen-devel
On 03/26/2013 12:54 PM, Jan Beulich wrote:
>
> Looks reasonable to me, minus severe formatting issues (hard
> tabs being the most obvious, but not the only problem).
Thanks for the review - Apologies on the formatting.
Would you prefer I clean up the formatting and re-submit, or would you
prefer to take care of it prior to commit
>
> I don't particularly like the increase of scope for these two, but
> I guess there aren't many alternatives.
Indeed.
My first pass at this only operated on cpupool0, which did not require
this scope increase...but it was pointed out that was probably not
sufficient to be acceptable in a more general solution appropriate to
upstream.
Thanks
Ben
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH] x86/S3: Restore broken vcpu affinity on resume
2013-03-26 16:58 ` Ben Guthro
@ 2013-03-26 17:04 ` Jan Beulich
0 siblings, 0 replies; 11+ messages in thread
From: Jan Beulich @ 2013-03-26 17:04 UTC (permalink / raw)
To: Ben Guthro; +Cc: xen-devel
>>> On 26.03.13 at 17:58, Ben Guthro <Benjamin.Guthro@citrix.com> wrote:
> On 03/26/2013 12:54 PM, Jan Beulich wrote:
>>
>> Looks reasonable to me, minus severe formatting issues (hard
>> tabs being the most obvious, but not the only problem).
>
> Thanks for the review - Apologies on the formatting.
> Would you prefer I clean up the formatting and re-submit, or would you
> prefer to take care of it prior to commit
Considering that this isn't just one or two places, I'd prefer you
to re-submit.
Jan
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH] x86/S3: Restore broken vcpu affinity on resume
@ 2013-03-26 17:20 Ben Guthro
2013-03-26 17:23 ` Ben Guthro
` (3 more replies)
0 siblings, 4 replies; 11+ messages in thread
From: Ben Guthro @ 2013-03-26 17:20 UTC (permalink / raw)
To: xen-devel; +Cc: Ben Guthro
When in SYS_STATE_suspend, and going through the cpu_disable_scheduler
path, save a copy of the current cpu affinity, and mark a flag to
restore it later.
Later, in the resume process, when enabling nonboot cpus restore these
affinities.
This is the second submission of this patch.
Primary differences from the first patch is to fix formatting problems.
However, when doing so, I tested with another patch in the
cpu_disable_scheduler() path that is also appropriate here.
Signed-off-by: Ben Guthro <benjamin.guthro@citrix.com>
---
xen/common/cpu.c | 3 +++
xen/common/cpupool.c | 5 +----
xen/common/domain.c | 2 ++
xen/common/schedule.c | 49 +++++++++++++++++++++++++++++++++++++++++++-
xen/include/xen/sched-if.h | 5 +++++
xen/include/xen/sched.h | 6 ++++++
6 files changed, 65 insertions(+), 5 deletions(-)
diff --git a/xen/common/cpu.c b/xen/common/cpu.c
index 630881e..3c77406 100644
--- a/xen/common/cpu.c
+++ b/xen/common/cpu.c
@@ -215,4 +215,7 @@ void enable_nonboot_cpus(void)
}
cpumask_clear(&frozen_cpus);
+
+ if (system_state == SYS_STATE_resume)
+ restore_vcpu_affinity();
}
diff --git a/xen/common/cpupool.c b/xen/common/cpupool.c
index 10b10f8..7a04f5e 100644
--- a/xen/common/cpupool.c
+++ b/xen/common/cpupool.c
@@ -19,13 +19,10 @@
#include <xen/sched-if.h>
#include <xen/cpu.h>
-#define for_each_cpupool(ptr) \
- for ((ptr) = &cpupool_list; *(ptr) != NULL; (ptr) = &((*(ptr))->next))
-
struct cpupool *cpupool0; /* Initial cpupool with Dom0 */
cpumask_t cpupool_free_cpus; /* cpus not in any cpupool */
-static struct cpupool *cpupool_list; /* linked list, sorted by poolid */
+struct cpupool *cpupool_list; /* linked list, sorted by poolid */
static int cpupool_moving_cpu = -1;
static struct cpupool *cpupool_cpu_moving = NULL;
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 64ee29d..590548e 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -126,6 +126,7 @@ struct vcpu *alloc_vcpu(
if ( !zalloc_cpumask_var(&v->cpu_affinity) ||
!zalloc_cpumask_var(&v->cpu_affinity_tmp) ||
+ !zalloc_cpumask_var(&v->cpu_affinity_saved) ||
!zalloc_cpumask_var(&v->vcpu_dirty_cpumask) )
goto fail_free;
@@ -155,6 +156,7 @@ struct vcpu *alloc_vcpu(
fail_free:
free_cpumask_var(v->cpu_affinity);
free_cpumask_var(v->cpu_affinity_tmp);
+ free_cpumask_var(v->cpu_affinity_saved);
free_cpumask_var(v->vcpu_dirty_cpumask);
free_vcpu_struct(v);
return NULL;
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 83fae4c..3e4d4ad 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -541,6 +541,46 @@ void vcpu_force_reschedule(struct vcpu *v)
}
}
+void restore_vcpu_affinity()
+{
+ struct domain *d;
+ struct vcpu *v;
+ struct cpupool **c;
+
+ for_each_cpupool(c)
+ {
+ for_each_domain_in_cpupool ( d, *c )
+ {
+ for_each_vcpu ( d, v )
+ {
+ vcpu_schedule_lock_irq(v);
+
+ if (v->affinity_broken)
+ {
+ printk("Restoring vcpu affinity for domain %d vcpu %d\n",
+ v->domain->domain_id, v->vcpu_id);
+ cpumask_copy(v->cpu_affinity, v->cpu_affinity_saved);
+ v->affinity_broken = 0;
+ }
+
+ if ( v->processor == smp_processor_id() )
+ {
+ set_bit(_VPF_migrating, &v->pause_flags);
+ vcpu_schedule_unlock_irq(v);
+ vcpu_sleep_nosync(v);
+ vcpu_migrate(v);
+ }
+ else
+ {
+ vcpu_schedule_unlock_irq(v);
+ }
+ }
+
+ domain_update_node_affinity(d);
+ }
+ }
+}
+
/*
* This function is used by cpu_hotplug code from stop_machine context
* and from cpupools to switch schedulers on a cpu.
@@ -554,7 +594,7 @@ int cpu_disable_scheduler(unsigned int cpu)
int ret = 0;
c = per_cpu(cpupool, cpu);
- if ( (c == NULL) || (system_state == SYS_STATE_suspend) )
+ if ( c == NULL )
return ret;
for_each_domain_in_cpupool ( d, c )
@@ -569,6 +609,13 @@ int cpu_disable_scheduler(unsigned int cpu)
{
printk("Breaking vcpu affinity for domain %d vcpu %d\n",
v->domain->domain_id, v->vcpu_id);
+
+ if (system_state == SYS_STATE_suspend)
+ {
+ cpumask_copy(v->cpu_affinity_saved, v->cpu_affinity);
+ v->affinity_broken = 1;
+ }
+
cpumask_setall(v->cpu_affinity);
}
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 9ace22c..547e71e 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -13,6 +13,9 @@
/* A global pointer to the initial cpupool (POOL0). */
extern struct cpupool *cpupool0;
+/* linked list of cpu pools */
+extern struct cpupool *cpupool_list;
+
/* cpus currently in no cpupool */
extern cpumask_t cpupool_free_cpus;
@@ -211,5 +214,7 @@ struct cpupool
(((_pool) == NULL) ? &cpupool_free_cpus : (_pool)->cpu_valid)
#define cpupool_online_cpumask(_pool) \
(((_pool) == NULL) ? &cpu_online_map : (_pool)->cpu_valid)
+#define for_each_cpupool(ptr) \
+ for ((ptr) = &cpupool_list; *(ptr) != NULL; (ptr) = &((*(ptr))->next))
#endif /* __XEN_SCHED_IF_H__ */
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index cabaf27..d24fc6b 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -153,6 +153,9 @@ struct vcpu
bool_t defer_shutdown;
/* VCPU is paused following shutdown request (d->is_shutting_down)? */
bool_t paused_for_shutdown;
+ /* VCPU need affinity restored */
+ bool_t affinity_broken;
+
/*
* > 0: a single port is being polled;
@@ -175,6 +178,8 @@ struct vcpu
cpumask_var_t cpu_affinity;
/* Used to change affinity temporarily. */
cpumask_var_t cpu_affinity_tmp;
+ /* Used to restore affinity across S3. */
+ cpumask_var_t cpu_affinity_saved;
/* Bitmask of CPUs which are holding onto this VCPU's state. */
cpumask_var_t vcpu_dirty_cpumask;
@@ -697,6 +702,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c);
void vcpu_force_reschedule(struct vcpu *v);
int cpu_disable_scheduler(unsigned int cpu);
int vcpu_set_affinity(struct vcpu *v, const cpumask_t *affinity);
+void restore_vcpu_affinity(void);
void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
uint64_t get_cpu_idle_time(unsigned int cpu);
--
1.7.9.5
^ permalink raw reply related [flat|nested] 11+ messages in thread
* Re: [PATCH] x86/S3: Restore broken vcpu affinity on resume
2013-03-26 17:20 Ben Guthro
@ 2013-03-26 17:23 ` Ben Guthro
2013-03-27 6:06 ` Juergen Gross
` (2 subsequent siblings)
3 siblings, 0 replies; 11+ messages in thread
From: Ben Guthro @ 2013-03-26 17:23 UTC (permalink / raw)
To: Ben Guthro; +Cc: xen-devel@lists.xen.org
On 03/26/2013 01:20 PM, Ben Guthro wrote:
> + if (system_state == SYS_STATE_suspend)
> + {
Drat. Looks like I missed a tab here.
> + cpumask_copy(v->cpu_affinity_saved, v->cpu_affinity);
> + v->affinity_broken = 1;
> + }
> +
> cpumask_setall(v->cpu_affinity);
> }
>
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH] x86/S3: Restore broken vcpu affinity on resume
2013-03-26 17:20 Ben Guthro
2013-03-26 17:23 ` Ben Guthro
@ 2013-03-27 6:06 ` Juergen Gross
2013-03-27 9:19 ` Jan Beulich
2013-03-27 12:01 ` George Dunlap
3 siblings, 0 replies; 11+ messages in thread
From: Juergen Gross @ 2013-03-27 6:06 UTC (permalink / raw)
To: Ben Guthro; +Cc: xen-devel
On 26.03.2013 18:20, Ben Guthro wrote:
> When in SYS_STATE_suspend, and going through the cpu_disable_scheduler
> path, save a copy of the current cpu affinity, and mark a flag to
> restore it later.
>
> Later, in the resume process, when enabling nonboot cpus restore these
> affinities.
>
> This is the second submission of this patch.
> Primary differences from the first patch is to fix formatting problems.
> However, when doing so, I tested with another patch in the
> cpu_disable_scheduler() path that is also appropriate here.
>
> Signed-off-by: Ben Guthro<benjamin.guthro@citrix.com>
Acked-by: Juergen Gross <juergen.gross@ts.fujitsu.com>
> ---
> xen/common/cpu.c | 3 +++
> xen/common/cpupool.c | 5 +----
> xen/common/domain.c | 2 ++
> xen/common/schedule.c | 49 +++++++++++++++++++++++++++++++++++++++++++-
> xen/include/xen/sched-if.h | 5 +++++
> xen/include/xen/sched.h | 6 ++++++
> 6 files changed, 65 insertions(+), 5 deletions(-)
>
> diff --git a/xen/common/cpu.c b/xen/common/cpu.c
> index 630881e..3c77406 100644
> --- a/xen/common/cpu.c
> +++ b/xen/common/cpu.c
> @@ -215,4 +215,7 @@ void enable_nonboot_cpus(void)
> }
>
> cpumask_clear(&frozen_cpus);
> +
> + if (system_state == SYS_STATE_resume)
> + restore_vcpu_affinity();
> }
> diff --git a/xen/common/cpupool.c b/xen/common/cpupool.c
> index 10b10f8..7a04f5e 100644
> --- a/xen/common/cpupool.c
> +++ b/xen/common/cpupool.c
> @@ -19,13 +19,10 @@
> #include<xen/sched-if.h>
> #include<xen/cpu.h>
>
> -#define for_each_cpupool(ptr) \
> - for ((ptr) =&cpupool_list; *(ptr) != NULL; (ptr) =&((*(ptr))->next))
> -
> struct cpupool *cpupool0; /* Initial cpupool with Dom0 */
> cpumask_t cpupool_free_cpus; /* cpus not in any cpupool */
>
> -static struct cpupool *cpupool_list; /* linked list, sorted by poolid */
> +struct cpupool *cpupool_list; /* linked list, sorted by poolid */
>
> static int cpupool_moving_cpu = -1;
> static struct cpupool *cpupool_cpu_moving = NULL;
> diff --git a/xen/common/domain.c b/xen/common/domain.c
> index 64ee29d..590548e 100644
> --- a/xen/common/domain.c
> +++ b/xen/common/domain.c
> @@ -126,6 +126,7 @@ struct vcpu *alloc_vcpu(
>
> if ( !zalloc_cpumask_var(&v->cpu_affinity) ||
> !zalloc_cpumask_var(&v->cpu_affinity_tmp) ||
> + !zalloc_cpumask_var(&v->cpu_affinity_saved) ||
> !zalloc_cpumask_var(&v->vcpu_dirty_cpumask) )
> goto fail_free;
>
> @@ -155,6 +156,7 @@ struct vcpu *alloc_vcpu(
> fail_free:
> free_cpumask_var(v->cpu_affinity);
> free_cpumask_var(v->cpu_affinity_tmp);
> + free_cpumask_var(v->cpu_affinity_saved);
> free_cpumask_var(v->vcpu_dirty_cpumask);
> free_vcpu_struct(v);
> return NULL;
> diff --git a/xen/common/schedule.c b/xen/common/schedule.c
> index 83fae4c..3e4d4ad 100644
> --- a/xen/common/schedule.c
> +++ b/xen/common/schedule.c
> @@ -541,6 +541,46 @@ void vcpu_force_reschedule(struct vcpu *v)
> }
> }
>
> +void restore_vcpu_affinity()
> +{
> + struct domain *d;
> + struct vcpu *v;
> + struct cpupool **c;
> +
> + for_each_cpupool(c)
> + {
> + for_each_domain_in_cpupool ( d, *c )
> + {
> + for_each_vcpu ( d, v )
> + {
> + vcpu_schedule_lock_irq(v);
> +
> + if (v->affinity_broken)
> + {
> + printk("Restoring vcpu affinity for domain %d vcpu %d\n",
> + v->domain->domain_id, v->vcpu_id);
> + cpumask_copy(v->cpu_affinity, v->cpu_affinity_saved);
> + v->affinity_broken = 0;
> + }
> +
> + if ( v->processor == smp_processor_id() )
> + {
> + set_bit(_VPF_migrating,&v->pause_flags);
> + vcpu_schedule_unlock_irq(v);
> + vcpu_sleep_nosync(v);
> + vcpu_migrate(v);
> + }
> + else
> + {
> + vcpu_schedule_unlock_irq(v);
> + }
> + }
> +
> + domain_update_node_affinity(d);
> + }
> + }
> +}
> +
> /*
> * This function is used by cpu_hotplug code from stop_machine context
> * and from cpupools to switch schedulers on a cpu.
> @@ -554,7 +594,7 @@ int cpu_disable_scheduler(unsigned int cpu)
> int ret = 0;
>
> c = per_cpu(cpupool, cpu);
> - if ( (c == NULL) || (system_state == SYS_STATE_suspend) )
> + if ( c == NULL )
> return ret;
>
> for_each_domain_in_cpupool ( d, c )
> @@ -569,6 +609,13 @@ int cpu_disable_scheduler(unsigned int cpu)
> {
> printk("Breaking vcpu affinity for domain %d vcpu %d\n",
> v->domain->domain_id, v->vcpu_id);
> +
> + if (system_state == SYS_STATE_suspend)
> + {
> + cpumask_copy(v->cpu_affinity_saved, v->cpu_affinity);
> + v->affinity_broken = 1;
> + }
> +
> cpumask_setall(v->cpu_affinity);
> }
>
> diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
> index 9ace22c..547e71e 100644
> --- a/xen/include/xen/sched-if.h
> +++ b/xen/include/xen/sched-if.h
> @@ -13,6 +13,9 @@
> /* A global pointer to the initial cpupool (POOL0). */
> extern struct cpupool *cpupool0;
>
> +/* linked list of cpu pools */
> +extern struct cpupool *cpupool_list;
> +
> /* cpus currently in no cpupool */
> extern cpumask_t cpupool_free_cpus;
>
> @@ -211,5 +214,7 @@ struct cpupool
> (((_pool) == NULL) ?&cpupool_free_cpus : (_pool)->cpu_valid)
> #define cpupool_online_cpumask(_pool) \
> (((_pool) == NULL) ?&cpu_online_map : (_pool)->cpu_valid)
> +#define for_each_cpupool(ptr) \
> + for ((ptr) =&cpupool_list; *(ptr) != NULL; (ptr) =&((*(ptr))->next))
>
> #endif /* __XEN_SCHED_IF_H__ */
> diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
> index cabaf27..d24fc6b 100644
> --- a/xen/include/xen/sched.h
> +++ b/xen/include/xen/sched.h
> @@ -153,6 +153,9 @@ struct vcpu
> bool_t defer_shutdown;
> /* VCPU is paused following shutdown request (d->is_shutting_down)? */
> bool_t paused_for_shutdown;
> + /* VCPU need affinity restored */
> + bool_t affinity_broken;
> +
>
> /*
> *> 0: a single port is being polled;
> @@ -175,6 +178,8 @@ struct vcpu
> cpumask_var_t cpu_affinity;
> /* Used to change affinity temporarily. */
> cpumask_var_t cpu_affinity_tmp;
> + /* Used to restore affinity across S3. */
> + cpumask_var_t cpu_affinity_saved;
>
> /* Bitmask of CPUs which are holding onto this VCPU's state. */
> cpumask_var_t vcpu_dirty_cpumask;
> @@ -697,6 +702,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c);
> void vcpu_force_reschedule(struct vcpu *v);
> int cpu_disable_scheduler(unsigned int cpu);
> int vcpu_set_affinity(struct vcpu *v, const cpumask_t *affinity);
> +void restore_vcpu_affinity(void);
>
> void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
> uint64_t get_cpu_idle_time(unsigned int cpu);
--
Juergen Gross Principal Developer Operating Systems
PBG PDG ES&S SWE OS6 Telephone: +49 (0) 89 3222 2967
Fujitsu Technology Solutions e-mail: juergen.gross@ts.fujitsu.com
Domagkstr. 28 Internet: ts.fujitsu.com
D-80807 Muenchen Company details: ts.fujitsu.com/imprint.html
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH] x86/S3: Restore broken vcpu affinity on resume
2013-03-26 17:20 Ben Guthro
2013-03-26 17:23 ` Ben Guthro
2013-03-27 6:06 ` Juergen Gross
@ 2013-03-27 9:19 ` Jan Beulich
2013-03-27 12:01 ` George Dunlap
3 siblings, 0 replies; 11+ messages in thread
From: Jan Beulich @ 2013-03-27 9:19 UTC (permalink / raw)
To: Ben Guthro; +Cc: xen-devel
>>> On 26.03.13 at 18:20, Ben Guthro <benjamin.guthro@citrix.com> wrote:
> +void restore_vcpu_affinity()
> +{
> + struct domain *d;
> + struct vcpu *v;
> + struct cpupool **c;
> +
> + for_each_cpupool(c)
> + {
> + for_each_domain_in_cpupool ( d, *c )
On a second look, I wonder why the two loops above can't be
replaced by for_each_domain(), avoiding the need to widen the
scopes of the two CPU pool specific items.
Furthermore the function could then take a domain pointer
(rather than itself looping over all domains), and be called from
thaw_domains() right before the call to domain_unpause().
Jan
> + {
> + for_each_vcpu ( d, v )
> + {
> + vcpu_schedule_lock_irq(v);
> +
> + if (v->affinity_broken)
> + {
> + printk("Restoring vcpu affinity for domain %d vcpu %d\n",
> + v->domain->domain_id, v->vcpu_id);
> + cpumask_copy(v->cpu_affinity, v->cpu_affinity_saved);
> + v->affinity_broken = 0;
> + }
> +
> + if ( v->processor == smp_processor_id() )
> + {
> + set_bit(_VPF_migrating, &v->pause_flags);
> + vcpu_schedule_unlock_irq(v);
> + vcpu_sleep_nosync(v);
> + vcpu_migrate(v);
> + }
> + else
> + {
> + vcpu_schedule_unlock_irq(v);
> + }
> + }
> +
> + domain_update_node_affinity(d);
> + }
> + }
> +}
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH] x86/S3: Restore broken vcpu affinity on resume
2013-03-26 17:20 Ben Guthro
` (2 preceding siblings ...)
2013-03-27 9:19 ` Jan Beulich
@ 2013-03-27 12:01 ` George Dunlap
2013-03-27 12:04 ` Ben Guthro
3 siblings, 1 reply; 11+ messages in thread
From: George Dunlap @ 2013-03-27 12:01 UTC (permalink / raw)
To: Ben Guthro; +Cc: xen-devel@lists.xen.org
On Tue, Mar 26, 2013 at 5:20 PM, Ben Guthro <benjamin.guthro@citrix.com> wrote:
> When in SYS_STATE_suspend, and going through the cpu_disable_scheduler
> path, save a copy of the current cpu affinity, and mark a flag to
> restore it later.
>
> Later, in the resume process, when enabling nonboot cpus restore these
> affinities.
>
> This is the second submission of this patch.
> Primary differences from the first patch is to fix formatting problems.
> However, when doing so, I tested with another patch in the
> cpu_disable_scheduler() path that is also appropriate here.
>
> Signed-off-by: Ben Guthro <benjamin.guthro@citrix.com>
Overall looks fine to me; just a few comments below.
> diff --git a/xen/common/cpupool.c b/xen/common/cpupool.c
> index 10b10f8..7a04f5e 100644
> --- a/xen/common/cpupool.c
> +++ b/xen/common/cpupool.c
> @@ -19,13 +19,10 @@
> #include <xen/sched-if.h>
> #include <xen/cpu.h>
>
> -#define for_each_cpupool(ptr) \
> - for ((ptr) = &cpupool_list; *(ptr) != NULL; (ptr) = &((*(ptr))->next))
> -
You're taking this out because it's not used, I presume?
Since you'll probably be sending another patch anyway (see below), I
think it would be better if you pull this out into a specific
"clean-up" patch.
> @@ -569,6 +609,13 @@ int cpu_disable_scheduler(unsigned int cpu)
> {
> printk("Breaking vcpu affinity for domain %d vcpu %d\n",
> v->domain->domain_id, v->vcpu_id);
> +
> + if (system_state == SYS_STATE_suspend)
> + {
This appears to have two tabs instead of 16 spaces?
-George
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH] x86/S3: Restore broken vcpu affinity on resume
2013-03-27 12:01 ` George Dunlap
@ 2013-03-27 12:04 ` Ben Guthro
2013-03-27 12:06 ` George Dunlap
0 siblings, 1 reply; 11+ messages in thread
From: Ben Guthro @ 2013-03-27 12:04 UTC (permalink / raw)
To: George Dunlap; +Cc: xen-devel@lists.xen.org
On 03/27/2013 08:01 AM, George Dunlap wrote:
> On Tue, Mar 26, 2013 at 5:20 PM, Ben Guthro <benjamin.guthro@citrix.com> wrote:
>> When in SYS_STATE_suspend, and going through the cpu_disable_scheduler
>> path, save a copy of the current cpu affinity, and mark a flag to
>> restore it later.
>>
>> Later, in the resume process, when enabling nonboot cpus restore these
>> affinities.
>>
>> This is the second submission of this patch.
>> Primary differences from the first patch is to fix formatting problems.
>> However, when doing so, I tested with another patch in the
>> cpu_disable_scheduler() path that is also appropriate here.
>>
>> Signed-off-by: Ben Guthro <benjamin.guthro@citrix.com>
>
> Overall looks fine to me; just a few comments below.
>
>> diff --git a/xen/common/cpupool.c b/xen/common/cpupool.c
>> index 10b10f8..7a04f5e 100644
>> --- a/xen/common/cpupool.c
>> +++ b/xen/common/cpupool.c
>> @@ -19,13 +19,10 @@
>> #include <xen/sched-if.h>
>> #include <xen/cpu.h>
>>
>> -#define for_each_cpupool(ptr) \
>> - for ((ptr) = &cpupool_list; *(ptr) != NULL; (ptr) = &((*(ptr))->next))
>> -
>
> You're taking this out because it's not used, I presume?
>
> Since you'll probably be sending another patch anyway (see below), I
> think it would be better if you pull this out into a specific
> "clean-up" patch.
No. This was moved to an h file to allow use elsewhere.
I'm in the process of looking into Jan's suggestion of eliminating the
need for it by moving some code into thaw_domains()
>
>
>> @@ -569,6 +609,13 @@ int cpu_disable_scheduler(unsigned int cpu)
>> {
>> printk("Breaking vcpu affinity for domain %d vcpu %d\n",
>> v->domain->domain_id, v->vcpu_id);
>> +
>> + if (system_state == SYS_STATE_suspend)
>> + {
>
> This appears to have two tabs instead of 16 spaces?
Yes, I'll fix this in v3.
Thanks for your review
Ben
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH] x86/S3: Restore broken vcpu affinity on resume
2013-03-27 12:04 ` Ben Guthro
@ 2013-03-27 12:06 ` George Dunlap
0 siblings, 0 replies; 11+ messages in thread
From: George Dunlap @ 2013-03-27 12:06 UTC (permalink / raw)
To: Ben Guthro; +Cc: xen-devel@lists.xen.org
On 27/03/13 12:04, Ben Guthro wrote:
>
> On 03/27/2013 08:01 AM, George Dunlap wrote:
>> On Tue, Mar 26, 2013 at 5:20 PM, Ben Guthro <benjamin.guthro@citrix.com> wrote:
>>> When in SYS_STATE_suspend, and going through the cpu_disable_scheduler
>>> path, save a copy of the current cpu affinity, and mark a flag to
>>> restore it later.
>>>
>>> Later, in the resume process, when enabling nonboot cpus restore these
>>> affinities.
>>>
>>> This is the second submission of this patch.
>>> Primary differences from the first patch is to fix formatting problems.
>>> However, when doing so, I tested with another patch in the
>>> cpu_disable_scheduler() path that is also appropriate here.
>>>
>>> Signed-off-by: Ben Guthro <benjamin.guthro@citrix.com>
>> Overall looks fine to me; just a few comments below.
>>
>>> diff --git a/xen/common/cpupool.c b/xen/common/cpupool.c
>>> index 10b10f8..7a04f5e 100644
>>> --- a/xen/common/cpupool.c
>>> +++ b/xen/common/cpupool.c
>>> @@ -19,13 +19,10 @@
>>> #include <xen/sched-if.h>
>>> #include <xen/cpu.h>
>>>
>>> -#define for_each_cpupool(ptr) \
>>> - for ((ptr) = &cpupool_list; *(ptr) != NULL; (ptr) = &((*(ptr))->next))
>>> -
>> You're taking this out because it's not used, I presume?
>>
>> Since you'll probably be sending another patch anyway (see below), I
>> think it would be better if you pull this out into a specific
>> "clean-up" patch.
> No. This was moved to an h file to allow use elsewhere.
> I'm in the process of looking into Jan's suggestion of eliminating the
> need for it by moving some code into thaw_domains()
Oh, right -- sorry, I missed it way down at the bottom of sched-if.h. N/m.
-George
^ permalink raw reply [flat|nested] 11+ messages in thread
end of thread, other threads:[~2013-03-27 12:06 UTC | newest]
Thread overview: 11+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-03-26 16:12 [PATCH] x86/S3: Restore broken vcpu affinity on resume Ben Guthro
2013-03-26 16:54 ` Jan Beulich
2013-03-26 16:58 ` Ben Guthro
2013-03-26 17:04 ` Jan Beulich
-- strict thread matches above, loose matches on Subject: below --
2013-03-26 17:20 Ben Guthro
2013-03-26 17:23 ` Ben Guthro
2013-03-27 6:06 ` Juergen Gross
2013-03-27 9:19 ` Jan Beulich
2013-03-27 12:01 ` George Dunlap
2013-03-27 12:04 ` Ben Guthro
2013-03-27 12:06 ` George Dunlap
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).