xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Juergen Gross <jgross@suse.com>
To: xen-devel@lists.xen.org
Cc: Juergen Gross <jgross@suse.com>,
	wei.liu2@citrix.com, stefano.stabellini@eu.citrix.com,
	george.dunlap@eu.citrix.com, andrew.cooper3@citrix.com,
	dario.faggioli@citrix.com, ian.jackson@eu.citrix.com,
	david.vrabel@citrix.com, jbeulich@suse.com
Subject: [PATCH v2 2/3] xen: add hypercall option to temporarily pin a vcpu
Date: Tue,  1 Mar 2016 10:02:12 +0100	[thread overview]
Message-ID: <1456822933-25041-3-git-send-email-jgross@suse.com> (raw)
In-Reply-To: <1456822933-25041-1-git-send-email-jgross@suse.com>

Some hardware (e.g. Dell studio 1555 laptops) require SMIs to be
called on physical cpu 0 only. Linux drivers like dcdbas or i8k try
to achieve this by pinning the running thread to cpu 0, but in Dom0
this is not enough: the vcpu must be pinned to physical cpu 0 via
Xen, too.

Add a stable hypercall option SCHEDOP_pin_temp to the sched_op
hypercall to achieve this. It is taking a physical cpu number as
parameter. If pinning is possible (the calling domain has the
privilege to make the call and the cpu is available in the domain's
cpupool) the calling vcpu is pinned to the specified cpu. The old
cpu affinity is saved. To undo the temporary pinning a cpu -1 is
specified. This will restore the original cpu affinity for the vcpu.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
V2: - limit operation to hardware domain as suggested by Jan Beulich
    - some style issues corrected as requested by Jan Beulich
    - use fixed width types in interface as requested by Jan Beulich
    - add compat layer checking as requested by Jan Beulich
---
 xen/common/compat/schedule.c |  4 ++
 xen/common/schedule.c        | 92 +++++++++++++++++++++++++++++++++++++++++---
 xen/include/public/sched.h   | 17 ++++++++
 xen/include/xlat.lst         |  1 +
 4 files changed, 109 insertions(+), 5 deletions(-)

diff --git a/xen/common/compat/schedule.c b/xen/common/compat/schedule.c
index 812c550..73b0f01 100644
--- a/xen/common/compat/schedule.c
+++ b/xen/common/compat/schedule.c
@@ -10,6 +10,10 @@
 
 #define do_sched_op compat_sched_op
 
+#define xen_sched_pin_temp sched_pin_temp
+CHECK_sched_pin_temp;
+#undef xen_sched_pin_temp
+
 #define xen_sched_shutdown sched_shutdown
 CHECK_sched_shutdown;
 #undef xen_sched_shutdown
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index b0d4b18..653f852 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -271,6 +271,12 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
     struct scheduler *old_ops;
     void *old_domdata;
 
+    for_each_vcpu ( d, v )
+    {
+        if ( v->affinity_broken )
+            return -EBUSY;
+    }
+
     domdata = SCHED_OP(c->sched, alloc_domdata, d);
     if ( domdata == NULL )
         return -ENOMEM;
@@ -669,6 +675,14 @@ int cpu_disable_scheduler(unsigned int cpu)
             if ( cpumask_empty(&online_affinity) &&
                  cpumask_test_cpu(cpu, v->cpu_hard_affinity) )
             {
+                if ( v->affinity_broken )
+                {
+                    /* The vcpu is temporarily pinned, can't move it. */
+                    vcpu_schedule_unlock_irqrestore(lock, flags, v);
+                    ret = -EBUSY;
+                    break;
+                }
+
                 if (system_state == SYS_STATE_suspend)
                 {
                     cpumask_copy(v->cpu_hard_affinity_saved,
@@ -752,14 +766,20 @@ static int vcpu_set_affinity(
     struct vcpu *v, const cpumask_t *affinity, cpumask_t *which)
 {
     spinlock_t *lock;
+    int ret = 0;
 
     lock = vcpu_schedule_lock_irq(v);
 
-    cpumask_copy(which, affinity);
+    if ( v->affinity_broken )
+        ret = -EBUSY;
+    else
+    {
+        cpumask_copy(which, affinity);
 
-    /* Always ask the scheduler to re-evaluate placement
-     * when changing the affinity */
-    set_bit(_VPF_migrating, &v->pause_flags);
+        /* Always ask the scheduler to re-evaluate placement
+         * when changing the affinity */
+        set_bit(_VPF_migrating, &v->pause_flags);
+    }
 
     vcpu_schedule_unlock_irq(lock, v);
 
@@ -771,7 +791,7 @@ static int vcpu_set_affinity(
         vcpu_migrate(v);
     }
 
-    return 0;
+    return ret;
 }
 
 int vcpu_set_hard_affinity(struct vcpu *v, const cpumask_t *affinity)
@@ -978,6 +998,51 @@ void watchdog_domain_destroy(struct domain *d)
         kill_timer(&d->watchdog_timer[i]);
 }
 
+static long do_pin_temp(int cpu)
+{
+    struct vcpu *v = current;
+    spinlock_t *lock;
+    long ret = -EINVAL;
+
+    lock = vcpu_schedule_lock_irq(v);
+
+    if ( cpu < 0 )
+    {
+        if ( v->affinity_broken )
+        {
+            cpumask_copy(v->cpu_hard_affinity, v->cpu_hard_affinity_saved);
+            v->affinity_broken = 0;
+            set_bit(_VPF_migrating, &v->pause_flags);
+            ret = 0;
+        }
+    }
+    else if ( cpu < nr_cpu_ids )
+    {
+        if ( v->affinity_broken )
+            ret = -EBUSY;
+        else if ( cpumask_test_cpu(cpu, VCPU2ONLINE(v)) )
+        {
+            cpumask_copy(v->cpu_hard_affinity_saved, v->cpu_hard_affinity);
+            v->affinity_broken = 1;
+            cpumask_copy(v->cpu_hard_affinity, cpumask_of(cpu));
+            set_bit(_VPF_migrating, &v->pause_flags);
+            ret = 0;
+        }
+    }
+
+    vcpu_schedule_unlock_irq(lock, v);
+
+    domain_update_node_affinity(v->domain);
+
+    if ( v->pause_flags & VPF_migrating )
+    {
+        vcpu_sleep_nosync(v);
+        vcpu_migrate(v);
+    }
+
+    return ret;
+}
+
 typedef long ret_t;
 
 #endif /* !COMPAT */
@@ -1087,6 +1152,23 @@ ret_t do_sched_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
         break;
     }
 
+    case SCHEDOP_pin_temp:
+    {
+        struct sched_pin_temp sched_pin_temp;
+
+        ret = -EFAULT;
+        if ( copy_from_guest(&sched_pin_temp, arg, 1) )
+            break;
+
+        ret = -EPERM;
+        if ( !is_hardware_domain(current->domain) )
+            break;
+
+        ret = do_pin_temp(sched_pin_temp.pcpu);
+
+        break;
+    }
+
     default:
         ret = -ENOSYS;
     }
diff --git a/xen/include/public/sched.h b/xen/include/public/sched.h
index 2219696..a0ce5a6 100644
--- a/xen/include/public/sched.h
+++ b/xen/include/public/sched.h
@@ -118,6 +118,17 @@
  * With id != 0 and timeout != 0, poke watchdog timer and set new timeout.
  */
 #define SCHEDOP_watchdog    6
+
+/*
+ * Temporarily pin the current vcpu to one physical cpu or undo that pinning.
+ * @arg == pointer to sched_pin_temp_t structure.
+ *
+ * Setting pcpu to -1 will undo a previous temporary pinning and restore the
+ * previous cpu affinity. The temporary aspect of the pinning isn't enforced
+ * by the hypervisor.
+ * This call is allowed for the hardware domain only.
+ */
+#define SCHEDOP_pin_temp    7
 /* ` } */
 
 struct sched_shutdown {
@@ -148,6 +159,12 @@ struct sched_watchdog {
 typedef struct sched_watchdog sched_watchdog_t;
 DEFINE_XEN_GUEST_HANDLE(sched_watchdog_t);
 
+struct sched_pin_temp {
+    int32_t pcpu;
+};
+typedef struct sched_pin_temp sched_pin_temp_t;
+DEFINE_XEN_GUEST_HANDLE(sched_pin_temp_t);
+
 /*
  * Reason codes for SCHEDOP_shutdown. These may be interpreted by control
  * software to determine the appropriate action. For the most part, Xen does
diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst
index fda1137..52c7233 100644
--- a/xen/include/xlat.lst
+++ b/xen/include/xlat.lst
@@ -104,6 +104,7 @@
 ?	pmu_data			pmu.h
 ?	pmu_params			pmu.h
 !	sched_poll			sched.h
+?	sched_pin_temp			sched.h
 ?	sched_remote_shutdown		sched.h
 ?	sched_shutdown			sched.h
 ?	tmem_oid			tmem.h
-- 
2.6.2


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

  parent reply	other threads:[~2016-03-01  9:02 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-03-01  9:02 [PATCH v2 0/3] add hypercall option to temporarily pin a vcpu Juergen Gross
2016-03-01  9:02 ` [PATCH v2 1/3] xen: silence affinity messages on suspend/resume Juergen Gross
2016-03-02 11:11   ` Dario Faggioli
2016-03-01  9:02 ` Juergen Gross [this message]
2016-03-01 11:27   ` [PATCH v2 2/3] xen: add hypercall option to temporarily pin a vcpu Jan Beulich
2016-03-01 11:55   ` David Vrabel
2016-03-01 11:58     ` Juergen Gross
2016-03-01 12:15       ` Dario Faggioli
2016-03-01 14:02         ` George Dunlap
     [not found]   ` <56D58ABF02000078000D7C46@suse.com>
2016-03-01 11:58     ` Juergen Gross
2016-03-01 15:52   ` George Dunlap
2016-03-01 15:55     ` George Dunlap
2016-03-01 16:11       ` Jan Beulich
2016-03-02  7:14     ` Juergen Gross
2016-03-02  9:27       ` Dario Faggioli
2016-03-02 11:19         ` Juergen Gross
2016-03-02 11:49           ` Dario Faggioli
2016-03-02 12:12             ` Juergen Gross
2016-03-02 15:34         ` Juergen Gross
2016-03-02 16:03           ` Dario Faggioli
2016-03-02 17:15             ` Juergen Gross
2016-03-02 17:21     ` Anshul Makkar
2016-03-03  5:31       ` Juergen Gross
2016-03-01  9:02 ` [PATCH v2 3/3] libxc: do some retries in xc_cpupool_removecpu() for EBUSY case Juergen Gross
2016-03-01 11:58   ` Wei Liu
2016-03-01 11:59     ` Juergen Gross

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1456822933-25041-3-git-send-email-jgross@suse.com \
    --to=jgross@suse.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=dario.faggioli@citrix.com \
    --cc=david.vrabel@citrix.com \
    --cc=george.dunlap@eu.citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=stefano.stabellini@eu.citrix.com \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).