* [PATCH] x86/S3: Restore broken vcpu affinity on resume (v3)
@ 2013-03-27 12:36 Ben Guthro
2013-03-27 12:50 ` Jan Beulich
0 siblings, 1 reply; 3+ messages in thread
From: Ben Guthro @ 2013-03-27 12:36 UTC (permalink / raw)
To: xen-devel; +Cc: Ben Guthro
When in SYS_STATE_suspend, and going through the cpu_disable_scheduler
path, save a copy of the current cpu affinity, and mark a flag to
restore it later.
Later, in the resume process, when enabling nonboot cpus restore these
affinities.
v2:
Fix formatting problems.
remove early return in cpu_disable_scheduler() path.
v3:
Fix remaining errant tab
Move restore_vcpu_affinity() to thaw_domains(), eliminating the need to
promote for_each_cpupool()
Signed-off-by: Ben Guthro <benjamin.guthro@citrix.com>
---
xen/arch/x86/acpi/power.c | 4 ++++
xen/common/domain.c | 2 ++
xen/common/schedule.c | 41 ++++++++++++++++++++++++++++++++++++++++-
xen/include/xen/sched.h | 6 ++++++
4 files changed, 52 insertions(+), 1 deletion(-)
diff --git a/xen/arch/x86/acpi/power.c b/xen/arch/x86/acpi/power.c
index 3c2585c..74cd371 100644
--- a/xen/arch/x86/acpi/power.c
+++ b/xen/arch/x86/acpi/power.c
@@ -96,7 +96,11 @@ static void thaw_domains(void)
rcu_read_lock(&domlist_read_lock);
for_each_domain ( d )
+ {
+ if (system_state == SYS_STATE_resume)
+ restore_vcpu_affinity(d);
domain_unpause(d);
+ }
rcu_read_unlock(&domlist_read_lock);
}
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 64ee29d..590548e 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -126,6 +126,7 @@ struct vcpu *alloc_vcpu(
if ( !zalloc_cpumask_var(&v->cpu_affinity) ||
!zalloc_cpumask_var(&v->cpu_affinity_tmp) ||
+ !zalloc_cpumask_var(&v->cpu_affinity_saved) ||
!zalloc_cpumask_var(&v->vcpu_dirty_cpumask) )
goto fail_free;
@@ -155,6 +156,7 @@ struct vcpu *alloc_vcpu(
fail_free:
free_cpumask_var(v->cpu_affinity);
free_cpumask_var(v->cpu_affinity_tmp);
+ free_cpumask_var(v->cpu_affinity_saved);
free_cpumask_var(v->vcpu_dirty_cpumask);
free_vcpu_struct(v);
return NULL;
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 83fae4c..3861923 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -541,6 +541,38 @@ void vcpu_force_reschedule(struct vcpu *v)
}
}
+void restore_vcpu_affinity(struct domain *d)
+{
+ struct vcpu *v;
+
+ for_each_vcpu ( d, v )
+ {
+ vcpu_schedule_lock_irq(v);
+
+ if (v->affinity_broken)
+ {
+ printk("Restoring vcpu affinity for domain %d vcpu %d\n",
+ v->domain->domain_id, v->vcpu_id);
+ cpumask_copy(v->cpu_affinity, v->cpu_affinity_saved);
+ v->affinity_broken = 0;
+ }
+
+ if ( v->processor == smp_processor_id() )
+ {
+ set_bit(_VPF_migrating, &v->pause_flags);
+ vcpu_schedule_unlock_irq(v);
+ vcpu_sleep_nosync(v);
+ vcpu_migrate(v);
+ }
+ else
+ {
+ vcpu_schedule_unlock_irq(v);
+ }
+ }
+
+ domain_update_node_affinity(d);
+}
+
/*
* This function is used by cpu_hotplug code from stop_machine context
* and from cpupools to switch schedulers on a cpu.
@@ -554,7 +586,7 @@ int cpu_disable_scheduler(unsigned int cpu)
int ret = 0;
c = per_cpu(cpupool, cpu);
- if ( (c == NULL) || (system_state == SYS_STATE_suspend) )
+ if ( c == NULL )
return ret;
for_each_domain_in_cpupool ( d, c )
@@ -569,6 +601,13 @@ int cpu_disable_scheduler(unsigned int cpu)
{
printk("Breaking vcpu affinity for domain %d vcpu %d\n",
v->domain->domain_id, v->vcpu_id);
+
+ if (system_state == SYS_STATE_suspend)
+ {
+ cpumask_copy(v->cpu_affinity_saved, v->cpu_affinity);
+ v->affinity_broken = 1;
+ }
+
cpumask_setall(v->cpu_affinity);
}
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index cabaf27..d15d567 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -153,6 +153,9 @@ struct vcpu
bool_t defer_shutdown;
/* VCPU is paused following shutdown request (d->is_shutting_down)? */
bool_t paused_for_shutdown;
+ /* VCPU need affinity restored */
+ bool_t affinity_broken;
+
/*
* > 0: a single port is being polled;
@@ -175,6 +178,8 @@ struct vcpu
cpumask_var_t cpu_affinity;
/* Used to change affinity temporarily. */
cpumask_var_t cpu_affinity_tmp;
+ /* Used to restore affinity across S3. */
+ cpumask_var_t cpu_affinity_saved;
/* Bitmask of CPUs which are holding onto this VCPU's state. */
cpumask_var_t vcpu_dirty_cpumask;
@@ -697,6 +702,7 @@ int schedule_cpu_switch(unsigned int cpu, struct cpupool *c);
void vcpu_force_reschedule(struct vcpu *v);
int cpu_disable_scheduler(unsigned int cpu);
int vcpu_set_affinity(struct vcpu *v, const cpumask_t *affinity);
+void restore_vcpu_affinity(struct domain *d);
void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
uint64_t get_cpu_idle_time(unsigned int cpu);
--
1.7.9.5
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH] x86/S3: Restore broken vcpu affinity on resume (v3)
2013-03-27 12:36 [PATCH] x86/S3: Restore broken vcpu affinity on resume (v3) Ben Guthro
@ 2013-03-27 12:50 ` Jan Beulich
2013-03-27 12:56 ` Ben Guthro
0 siblings, 1 reply; 3+ messages in thread
From: Jan Beulich @ 2013-03-27 12:50 UTC (permalink / raw)
To: Ben Guthro; +Cc: xen-devel
>>> On 27.03.13 at 13:36, Ben Guthro <benjamin.guthro@citrix.com> wrote:
> --- a/xen/arch/x86/acpi/power.c
> +++ b/xen/arch/x86/acpi/power.c
> @@ -96,7 +96,11 @@ static void thaw_domains(void)
>
> rcu_read_lock(&domlist_read_lock);
> for_each_domain ( d )
> + {
> + if (system_state == SYS_STATE_resume)
I don't think there's a way to get here with system_state other
than SYS_STATE_resume.
Also, should there be a need to re-submit again, there are spaces
missing inside the parentheses.
> + restore_vcpu_affinity(d);
> domain_unpause(d);
> + }
> rcu_read_unlock(&domlist_read_lock);
> }
>
> --- a/xen/common/schedule.c
> +++ b/xen/common/schedule.c
> @@ -541,6 +541,38 @@ void vcpu_force_reschedule(struct vcpu *v)
> }
> }
>
> +void restore_vcpu_affinity(struct domain *d)
> +{
> + struct vcpu *v;
> +
> + for_each_vcpu ( d, v )
> + {
> + vcpu_schedule_lock_irq(v);
> +
> + if (v->affinity_broken)
And here again.
> + {
> + printk("Restoring vcpu affinity for domain %d vcpu %d\n",
> + v->domain->domain_id, v->vcpu_id);
XENLOG_DEBUG perhaps? Otherwise this can get pretty noisy
even without loglvl= override during resume if there are many
and/or big domains. To conserve on ring and transmit buffer space,
I'd also suggest shortening the text to "Restoring affinity for
d%dv%d\n" (and using d->domain_id).
Jan
> + cpumask_copy(v->cpu_affinity, v->cpu_affinity_saved);
> + v->affinity_broken = 0;
> + }
> +
> + if ( v->processor == smp_processor_id() )
> + {
> + set_bit(_VPF_migrating, &v->pause_flags);
> + vcpu_schedule_unlock_irq(v);
> + vcpu_sleep_nosync(v);
> + vcpu_migrate(v);
> + }
> + else
> + {
> + vcpu_schedule_unlock_irq(v);
> + }
> + }
> +
> + domain_update_node_affinity(d);
> +}
> +
> /*
> * This function is used by cpu_hotplug code from stop_machine context
> * and from cpupools to switch schedulers on a cpu.
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH] x86/S3: Restore broken vcpu affinity on resume (v3)
2013-03-27 12:50 ` Jan Beulich
@ 2013-03-27 12:56 ` Ben Guthro
0 siblings, 0 replies; 3+ messages in thread
From: Ben Guthro @ 2013-03-27 12:56 UTC (permalink / raw)
To: Jan Beulich; +Cc: xen-devel
On 03/27/2013 08:50 AM, Jan Beulich wrote:
>>>> On 27.03.13 at 13:36, Ben Guthro <benjamin.guthro@citrix.com> wrote:
>> --- a/xen/arch/x86/acpi/power.c
>> +++ b/xen/arch/x86/acpi/power.c
>> @@ -96,7 +96,11 @@ static void thaw_domains(void)
>>
>> rcu_read_lock(&domlist_read_lock);
>> for_each_domain ( d )
>> + {
>> + if (system_state == SYS_STATE_resume)
>
> I don't think there's a way to get here with system_state other
> than SYS_STATE_resume.
>
> Also, should there be a need to re-submit again, there are spaces
> missing inside the parentheses.
OK, I'll remove this if entirely
>
>> + restore_vcpu_affinity(d);
>> domain_unpause(d);
>> + }
>> rcu_read_unlock(&domlist_read_lock);
>> }
>>
>> --- a/xen/common/schedule.c
>> +++ b/xen/common/schedule.c
>> @@ -541,6 +541,38 @@ void vcpu_force_reschedule(struct vcpu *v)
>> }
>> }
>>
>> +void restore_vcpu_affinity(struct domain *d)
>> +{
>> + struct vcpu *v;
>> +
>> + for_each_vcpu ( d, v )
>> + {
>> + vcpu_schedule_lock_irq(v);
>> +
>> + if (v->affinity_broken)
>
> And here again.
ACK. Will resolve in v4
>
>> + {
>> + printk("Restoring vcpu affinity for domain %d vcpu %d\n",
>> + v->domain->domain_id, v->vcpu_id);
>
> XENLOG_DEBUG perhaps? Otherwise this can get pretty noisy
> even without loglvl= override during resume if there are many
> and/or big domains. To conserve on ring and transmit buffer space,
> I'd also suggest shortening the text to "Restoring affinity for
> d%dv%d\n" (and using d->domain_id).
>
> Jan
I modeled this after the printk where the affinity was broken, so they
could be matched up in the log, for anyone looking.
Should I also change that printk to XENLOG_DEBUG?
Ben
>
>> + cpumask_copy(v->cpu_affinity, v->cpu_affinity_saved);
>> + v->affinity_broken = 0;
>> + }
>> +
>> + if ( v->processor == smp_processor_id() )
>> + {
>> + set_bit(_VPF_migrating, &v->pause_flags);
>> + vcpu_schedule_unlock_irq(v);
>> + vcpu_sleep_nosync(v);
>> + vcpu_migrate(v);
>> + }
>> + else
>> + {
>> + vcpu_schedule_unlock_irq(v);
>> + }
>> + }
>> +
>> + domain_update_node_affinity(d);
>> +}
>> +
>> /*
>> * This function is used by cpu_hotplug code from stop_machine context
>> * and from cpupools to switch schedulers on a cpu.
>
>
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2013-03-27 12:56 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-03-27 12:36 [PATCH] x86/S3: Restore broken vcpu affinity on resume (v3) Ben Guthro
2013-03-27 12:50 ` Jan Beulich
2013-03-27 12:56 ` Ben Guthro
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).