xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Chong Li <lichong659@gmail.com>
To: xen-devel@lists.xen.org
Cc: Chong Li <chong.li@wustl.edu>, Sisu Xi <xisisu@gmail.com>,
	george.dunlap@eu.citrix.com, dario.faggioli@citrix.com,
	Meng Xu <mengxu@cis.upenn.edu>,
	jbeulich@suse.com, lichong659@gmail.com, dgolomb@seas.upenn.edu
Subject: [PATCH v3 for Xen 4.6 1/4] xen: enable per-VCPU parameter settings for RTDS scheduler
Date: Sun, 28 Jun 2015 21:44:56 -0500	[thread overview]
Message-ID: <1435545899-22751-2-git-send-email-chong.li@wustl.edu> (raw)
In-Reply-To: <1435545899-22751-1-git-send-email-chong.li@wustl.edu>

Add two hypercalls(XEN_DOMCTL_SCHEDOP_getvcpuinfo/putvcpuinfo) to get/set a domain's
per-VCPU parameters.

Changes on PATCH v2:

1) Change struct xen_domctl_scheduler_op, for transferring per-vcpu parameters
between libxc and hypervisor.

2) Handler of XEN_DOMCTL_SCHEDOP_getinfo now just returns the default budget and period values of RTDS scheduler.

3) Handler of XEN_DOMCTL_SCHEDOP_getvcpuinfo now can return a random subset of the parameters of the VCPUs of a specific domain

Signed-off-by: Chong Li <chong.li@wustl.edu>
Signed-off-by: Meng Xu <mengxu@cis.upenn.edu>
Signed-off-by: Sisu Xi <xisisu@gmail.com>

---
CC: <dario.faggioli@citrix.com>
CC: <george.dunlap@eu.citrix.com>
CC: <dgolomb@seas.upenn.edu>
CC: <mengxu@cis.upenn.edu>
CC: <jbeulich@suse.com>
CC: <lichong659@gmail.com>
---
 xen/common/Makefile         |  1 -
 xen/common/domctl.c         |  3 ++
 xen/common/sched_credit.c   | 14 ++++----
 xen/common/sched_credit2.c  |  6 ++--
 xen/common/sched_rt.c       | 80 +++++++++++++++++++++++++++++++++++++++++----
 xen/common/schedule.c       |  5 +--
 xen/include/public/domctl.h | 64 ++++++++++++++++++++++++++----------
 7 files changed, 136 insertions(+), 37 deletions(-)

diff --git a/xen/common/Makefile b/xen/common/Makefile
index 1cddebc..3fdf931 100644
--- a/xen/common/Makefile
+++ b/xen/common/Makefile
@@ -31,7 +31,6 @@ obj-y += rbtree.o
 obj-y += rcupdate.o
 obj-y += sched_credit.o
 obj-y += sched_credit2.o
-obj-y += sched_sedf.o
 obj-y += sched_arinc653.o
 obj-y += sched_rt.o
 obj-y += schedule.o
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index 2a2d203..349f68b 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -839,6 +839,9 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
 
     case XEN_DOMCTL_scheduler_op:
         ret = sched_adjust(d, &op->u.scheduler_op);
+        if ( ret == -ERESTART )
+            ret = hypercall_create_continuation(
+                __HYPERVISOR_domctl, "h", u_domctl);
         copyback = 1;
         break;
 
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 953ecb0..43b086b 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -1039,25 +1039,25 @@ csched_dom_cntl(
 
     if ( op->cmd == XEN_DOMCTL_SCHEDOP_getinfo )
     {
-        op->u.credit.weight = sdom->weight;
-        op->u.credit.cap = sdom->cap;
+        op->u.d.credit.weight = sdom->weight;
+        op->u.d.credit.cap = sdom->cap;
     }
     else
     {
         ASSERT(op->cmd == XEN_DOMCTL_SCHEDOP_putinfo);
 
-        if ( op->u.credit.weight != 0 )
+        if ( op->u.d.credit.weight != 0 )
         {
             if ( !list_empty(&sdom->active_sdom_elem) )
             {
                 prv->weight -= sdom->weight * sdom->active_vcpu_count;
-                prv->weight += op->u.credit.weight * sdom->active_vcpu_count;
+                prv->weight += op->u.d.credit.weight * sdom->active_vcpu_count;
             }
-            sdom->weight = op->u.credit.weight;
+            sdom->weight = op->u.d.credit.weight;
         }
 
-        if ( op->u.credit.cap != (uint16_t)~0U )
-            sdom->cap = op->u.credit.cap;
+        if ( op->u.d.credit.cap != (uint16_t)~0U )
+            sdom->cap = op->u.d.credit.cap;
 
     }
 
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 75e0321..8992423 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -1438,20 +1438,20 @@ csched2_dom_cntl(
 
     if ( op->cmd == XEN_DOMCTL_SCHEDOP_getinfo )
     {
-        op->u.credit2.weight = sdom->weight;
+        op->u.d.credit2.weight = sdom->weight;
     }
     else
     {
         ASSERT(op->cmd == XEN_DOMCTL_SCHEDOP_putinfo);
 
-        if ( op->u.credit2.weight != 0 )
+        if ( op->u.d.credit2.weight != 0 )
         {
             struct list_head *iter;
             int old_weight;
 
             old_weight = sdom->weight;
 
-            sdom->weight = op->u.credit2.weight;
+            sdom->weight = op->u.d.credit2.weight;
 
             /* Update weights for vcpus, and max_weight for runqueues on which they reside */
             list_for_each ( iter, &sdom->vcpu )
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 4372486..8d1740d 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -1137,18 +1137,19 @@ rt_dom_cntl(
     struct list_head *iter;
     unsigned long flags;
     int rc = 0;
+    xen_domctl_schedparam_vcpu_t local_sched;
+    unsigned int index;
 
     switch ( op->cmd )
     {
     case XEN_DOMCTL_SCHEDOP_getinfo:
         spin_lock_irqsave(&prv->lock, flags);
-        svc = list_entry(sdom->vcpu.next, struct rt_vcpu, sdom_elem);
-        op->u.rtds.period = svc->period / MICROSECS(1); /* transfer to us */
-        op->u.rtds.budget = svc->budget / MICROSECS(1);
+        op->u.d.rtds.period = RTDS_DEFAULT_PERIOD / MICROSECS(1); /* transfer to us */
+        op->u.d.rtds.budget = RTDS_DEFAULT_BUDGET / MICROSECS(1);
         spin_unlock_irqrestore(&prv->lock, flags);
         break;
     case XEN_DOMCTL_SCHEDOP_putinfo:
-        if ( op->u.rtds.period == 0 || op->u.rtds.budget == 0 )
+        if ( op->u.d.rtds.period == 0 || op->u.d.rtds.budget == 0 )
         {
             rc = -EINVAL;
             break;
@@ -1157,8 +1158,75 @@ rt_dom_cntl(
         list_for_each( iter, &sdom->vcpu )
         {
             struct rt_vcpu * svc = list_entry(iter, struct rt_vcpu, sdom_elem);
-            svc->period = MICROSECS(op->u.rtds.period); /* transfer to nanosec */
-            svc->budget = MICROSECS(op->u.rtds.budget);
+            svc->period = MICROSECS(op->u.d.rtds.period); /* transfer to nanosec */
+            svc->budget = MICROSECS(op->u.d.rtds.budget);
+        }
+        spin_unlock_irqrestore(&prv->lock, flags);
+        break;
+    case XEN_DOMCTL_SCHEDOP_getvcpuinfo:
+        spin_lock_irqsave(&prv->lock, flags);
+        for( index = 0; index < op->u.v.nr_vcpus; index++ )
+        {
+            if ( copy_from_guest_offset(&local_sched,
+                    op->u.v.vcpus, index, 1) )
+            {
+                rc = -EFAULT;
+                break;
+            }
+            if ( local_sched.vcpuid >= d->max_vcpus
+                    || d->vcpu[local_sched.vcpuid] == NULL )
+            {
+                rc = -EINVAL;
+                break;
+            }
+            svc = rt_vcpu(d->vcpu[local_sched.vcpuid]);
+
+            local_sched.vcpuid = svc->vcpu->vcpu_id;
+            local_sched.s.rtds.budget = svc->budget / MICROSECS(1);
+            local_sched.s.rtds.period = svc->period / MICROSECS(1);
+            if( index >= op->u.v.nr_vcpus ) /* not enough guest buffer*/
+            {
+                rc = -ENOBUFS;
+                break;
+            }
+            if ( copy_to_guest_offset(op->u.v.vcpus, index,
+                    &local_sched, 1) )
+            {
+                rc = -EFAULT;
+                break;
+            }
+            if( hypercall_preempt_check() )
+            {
+                rc = -ERESTART;
+                break;
+            }
+        }
+        spin_unlock_irqrestore(&prv->lock, flags);
+        break;
+    case XEN_DOMCTL_SCHEDOP_putvcpuinfo:
+        spin_lock_irqsave(&prv->lock, flags);
+        for( index = 0; index < op->u.v.nr_vcpus; index++ )
+        {
+            if ( copy_from_guest_offset(&local_sched,
+                    op->u.v.vcpus, index, 1) )
+            {
+                rc = -EFAULT;
+                break;
+            }
+            if ( local_sched.vcpuid >= d->max_vcpus
+                    || d->vcpu[local_sched.vcpuid] == NULL )
+            {
+                rc = -EINVAL;
+                break;
+            }
+            svc = rt_vcpu(d->vcpu[local_sched.vcpuid]);
+            svc->period = MICROSECS(local_sched.s.rtds.period);
+            svc->budget = MICROSECS(local_sched.s.rtds.budget);
+            if( hypercall_preempt_check() )
+            {
+                rc = -ERESTART;
+                break;
+            }
         }
         spin_unlock_irqrestore(&prv->lock, flags);
         break;
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index ecf1545..159425e 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -65,7 +65,6 @@ DEFINE_PER_CPU(struct schedule_data, schedule_data);
 DEFINE_PER_CPU(struct scheduler *, scheduler);
 
 static const struct scheduler *schedulers[] = {
-    &sched_sedf_def,
     &sched_credit_def,
     &sched_credit2_def,
     &sched_arinc653_def,
@@ -1054,7 +1053,9 @@ long sched_adjust(struct domain *d, struct xen_domctl_scheduler_op *op)
 
     if ( (op->sched_id != DOM2OP(d)->sched_id) ||
          ((op->cmd != XEN_DOMCTL_SCHEDOP_putinfo) &&
-          (op->cmd != XEN_DOMCTL_SCHEDOP_getinfo)) )
+          (op->cmd != XEN_DOMCTL_SCHEDOP_getinfo) &&
+          (op->cmd != XEN_DOMCTL_SCHEDOP_putvcpuinfo) &&
+          (op->cmd != XEN_DOMCTL_SCHEDOP_getvcpuinfo)) )
         return -EINVAL;
 
     /* NB: the pluggable scheduler code needs to take care
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index bc45ea5..67a5626 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -330,31 +330,59 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_vcpus_t);
 #define XEN_SCHEDULER_ARINC653 7
 #define XEN_SCHEDULER_RTDS     8
 
+typedef struct xen_domctl_sched_sedf {
+            uint64_aligned_t period;
+            uint64_aligned_t slice;
+            uint64_aligned_t latency;
+            uint32_t extratime;
+            uint32_t weight;
+} xen_domctl_sched_sedf_t;
+
+typedef struct xen_domctl_sched_credit {
+    uint16_t weight;
+    uint16_t cap;
+} xen_domctl_sched_credit_t;
+
+typedef struct xen_domctl_sched_credit2 {
+    uint16_t weight;
+} xen_domctl_sched_credit2_t;
+
+typedef struct xen_domctl_sched_rtds {
+    uint32_t period;
+    uint32_t budget;
+} xen_domctl_sched_rtds_t;
+
+typedef union xen_domctl_schedparam {
+    xen_domctl_sched_sedf_t sedf;
+    xen_domctl_sched_credit_t credit;
+    xen_domctl_sched_credit2_t credit2;
+    xen_domctl_sched_rtds_t rtds;
+} xen_domctl_schedparam_t;
+
+typedef struct xen_domctl_schedparam_vcpu {
+    union {
+        xen_domctl_sched_credit_t credit;
+        xen_domctl_sched_credit2_t credit2;
+        xen_domctl_sched_rtds_t rtds;
+    } s;
+    uint16_t vcpuid;
+} xen_domctl_schedparam_vcpu_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_schedparam_vcpu_t);
+
 /* Set or get info? */
 #define XEN_DOMCTL_SCHEDOP_putinfo 0
 #define XEN_DOMCTL_SCHEDOP_getinfo 1
+#define XEN_DOMCTL_SCHEDOP_putvcpuinfo 2
+#define XEN_DOMCTL_SCHEDOP_getvcpuinfo 3
 struct xen_domctl_scheduler_op {
     uint32_t sched_id;  /* XEN_SCHEDULER_* */
     uint32_t cmd;       /* XEN_DOMCTL_SCHEDOP_* */
     union {
-        struct xen_domctl_sched_sedf {
-            uint64_aligned_t period;
-            uint64_aligned_t slice;
-            uint64_aligned_t latency;
-            uint32_t extratime;
-            uint32_t weight;
-        } sedf;
-        struct xen_domctl_sched_credit {
-            uint16_t weight;
-            uint16_t cap;
-        } credit;
-        struct xen_domctl_sched_credit2 {
-            uint16_t weight;
-        } credit2;
-        struct xen_domctl_sched_rtds {
-            uint32_t period;
-            uint32_t budget;
-        } rtds;
+        xen_domctl_schedparam_t d;
+        struct {
+            XEN_GUEST_HANDLE_64(xen_domctl_schedparam_vcpu_t) vcpus;
+            uint16_t nr_vcpus;
+        } v;
     } u;
 };
 typedef struct xen_domctl_scheduler_op xen_domctl_scheduler_op_t;
-- 
1.9.1

  reply	other threads:[~2015-06-29  2:44 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-06-29  2:44 [PATCH v3 for Xen 4.6 0/4] Enable per-VCPU parameter settings for RTDS scheduler Chong Li
2015-06-29  2:44 ` Chong Li [this message]
2015-07-07  8:59   ` [PATCH v3 for Xen 4.6 1/4] xen: enable " Jan Beulich
2015-07-07 14:39     ` Dario Faggioli
2015-07-08  6:06       ` Meng Xu
2015-07-08  8:33         ` Dario Faggioli
2015-07-09  1:16           ` Meng Xu
2015-07-10  9:51             ` Dario Faggioli
2015-07-07 14:55     ` Dario Faggioli
2015-07-07 16:09       ` Jan Beulich
2015-07-07 15:33     ` Chong Li
2015-07-07 15:41       ` Jan Beulich
2015-07-07 15:46       ` Dario Faggioli
2015-07-07 14:51   ` Dario Faggioli
2015-06-29  2:44 ` [PATCH v3 for Xen 4.6 2/4] libxc: " Chong Li
2015-06-30 12:22   ` Ian Campbell
2015-06-30 15:18     ` Chong Li
2015-06-30 15:32       ` Ian Campbell
2015-06-30 15:57         ` Chong Li
2015-06-30 16:04           ` Ian Campbell
2015-06-29  2:44 ` [PATCH v3 for Xen 4.6 3/4] libxl: " Chong Li
2015-06-30 12:26   ` Ian Campbell
2015-06-30 15:42     ` Chong Li
2015-06-30 15:57       ` Ian Campbell
2015-06-30 16:10         ` Chong Li
2015-06-30 16:19           ` Ian Campbell
2015-06-30 16:53             ` Chong Li
2015-07-01  0:54             ` Meng Xu
2015-07-01  8:48               ` Ian Campbell
2015-07-01 12:50                 ` Dario Faggioli
2015-07-01 16:59                   ` Chong Li
2015-07-07 16:23   ` Dario Faggioli
2015-07-08 14:35     ` Chong Li
2015-07-08 14:45       ` Dario Faggioli
2015-06-29  2:44 ` [PATCH v3 for Xen 4.6 4/4] xl: " Chong Li
2015-07-07 15:34   ` Dario Faggioli
2015-07-07 16:01     ` Chong Li
2015-07-07 15:16 ` [PATCH v3 for Xen 4.6 0/4] Enable " Dario Faggioli
2015-07-07 16:11   ` Chong Li

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1435545899-22751-2-git-send-email-chong.li@wustl.edu \
    --to=lichong659@gmail.com \
    --cc=chong.li@wustl.edu \
    --cc=dario.faggioli@citrix.com \
    --cc=dgolomb@seas.upenn.edu \
    --cc=george.dunlap@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=mengxu@cis.upenn.edu \
    --cc=xen-devel@lists.xen.org \
    --cc=xisisu@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).