xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Dario Faggioli <dario.faggioli@citrix.com>
To: xen-devel@lists.xen.org
Cc: Marcus Granado <Marcus.Granado@eu.citrix.com>,
	Keir Fraser <keir@xen.org>,
	Ian Campbell <Ian.Campbell@citrix.com>,
	Li Yechen <lccycc123@gmail.com>,
	George Dunlap <george.dunlap@eu.citrix.com>,
	Andrew Cooper <Andrew.Cooper3@citrix.com>,
	Juergen Gross <juergen.gross@ts.fujitsu.com>,
	Ian Jackson <Ian.Jackson@eu.citrix.com>,
	Jan Beulich <JBeulich@suse.com>,
	Justin Weaver <jtweaver@hawaii.edu>,
	Daniel De Graaf <dgdegra@tycho.nsa.gov>,
	Matt Wilson <msw@amazon.com>,
	Elena Ufimtseva <ufimtseva@gmail.com>
Subject: [PATCH RESEND 08/12] xen: numa-sched: enable getting/specifying per-vcpu node-affinity
Date: Tue, 05 Nov 2013 15:35:31 +0100	[thread overview]
Message-ID: <20131105143531.30446.76559.stgit@Solace> (raw)
In-Reply-To: <20131105142844.30446.78671.stgit@Solace>

via two new DOMCTLs: getvcpunodeaffinity and setvcpunodeaffinity.

They're very similar to XEN_DOMCTL_{get,set}vcpuaffinity
(with the only exception that they take a nodemap instead than
a cpumap).

Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>
---
 tools/libxc/xc_domain.c     |    8 ++++---
 xen/common/domctl.c         |   47 ++++++++++++++++++++++++++++++++++++++-----
 xen/include/public/domctl.h |    8 +++++--
 xen/xsm/flask/hooks.c       |    2 ++
 4 files changed, 54 insertions(+), 11 deletions(-)

diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index 2cea6e3..f8825a3 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -219,9 +219,9 @@ int xc_vcpu_setaffinity(xc_interface *xch,
 
     memcpy(local, cpumap, cpusize);
 
-    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
+    set_xen_guest_handle(domctl.u.vcpuaffinity.map.bitmap, local);
 
-    domctl.u.vcpuaffinity.cpumap.nr_bits = cpusize * 8;
+    domctl.u.vcpuaffinity.map.nr_bits = cpusize * 8;
 
     ret = do_domctl(xch, &domctl);
 
@@ -260,8 +260,8 @@ int xc_vcpu_getaffinity(xc_interface *xch,
     domctl.domain = (domid_t)domid;
     domctl.u.vcpuaffinity.vcpu = vcpu;
 
-    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
-    domctl.u.vcpuaffinity.cpumap.nr_bits = cpusize * 8;
+    set_xen_guest_handle(domctl.u.vcpuaffinity.map.bitmap, local);
+    domctl.u.vcpuaffinity.map.nr_bits = cpusize * 8;
 
     ret = do_domctl(xch, &domctl);
 
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index 870eef1..6bc3f26 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -584,7 +584,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
     break;
 
     case XEN_DOMCTL_setvcpuaffinity:
-    case XEN_DOMCTL_getvcpuaffinity:
+    case XEN_DOMCTL_setvcpunodeaffinity:
     {
         struct vcpu *v;
 
@@ -600,8 +600,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
         {
             cpumask_var_t new_affinity;
 
-            ret = xenctl_bitmap_to_cpumask(
-                &new_affinity, &op->u.vcpuaffinity.cpumap);
+            ret = xenctl_bitmap_to_cpumask(&new_affinity,
+                                           &op->u.vcpuaffinity.map);
             if ( !ret )
             {
                 ret = vcpu_set_affinity(v, new_affinity);
@@ -610,8 +610,45 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
         }
         else
         {
-            ret = cpumask_to_xenctl_bitmap(
-                &op->u.vcpuaffinity.cpumap, v->cpu_affinity);
+            nodemask_t new_affinity;
+
+            ret = xenctl_bitmap_to_nodemask(&new_affinity,
+                                            &op->u.vcpuaffinity.map);
+            if ( !ret )
+                ret = vcpu_set_node_affinity(v, &new_affinity);
+        }
+    }
+    break;
+
+    case XEN_DOMCTL_getvcpuaffinity:
+    case XEN_DOMCTL_getvcpunodeaffinity:
+    {
+        struct vcpu *v;
+
+        ret = -EINVAL;
+        if ( op->u.vcpuaffinity.vcpu >= d->max_vcpus )
+            break;
+
+        ret = -ESRCH;
+        if ( (v = d->vcpu[op->u.vcpuaffinity.vcpu]) == NULL )
+            break;
+
+        if ( op->cmd == XEN_DOMCTL_getvcpuaffinity )
+        {
+            ret = cpumask_to_xenctl_bitmap(&op->u.vcpuaffinity.map,
+                                           v->cpu_affinity);
+        }
+        else
+        {
+            nodemask_t affinity;
+            int cpu;
+
+            nodes_clear(affinity);
+            for_each_cpu ( cpu, v->node_affinity )
+                node_set(cpu_to_node(cpu), affinity);
+
+            ret = nodemask_to_xenctl_bitmap(&op->u.vcpuaffinity.map,
+                                            &affinity);
         }
     }
     break;
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index d4e479f..78b6d70 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -290,12 +290,14 @@ typedef struct xen_domctl_nodeaffinity xen_domctl_nodeaffinity_t;
 DEFINE_XEN_GUEST_HANDLE(xen_domctl_nodeaffinity_t);
 
 
-/* Get/set which physical cpus a vcpu can execute on. */
+/* Get/set which physical cpus a vcpu can or prefer execute on. */
 /* XEN_DOMCTL_setvcpuaffinity */
 /* XEN_DOMCTL_getvcpuaffinity */
+/* XEN_DOMCTL_setvcpunodeaffinity */
+/* XEN_DOMCTL_getvcpunodeaffinity */
 struct xen_domctl_vcpuaffinity {
     uint32_t  vcpu;              /* IN */
-    struct xenctl_bitmap cpumap; /* IN/OUT */
+    struct xenctl_bitmap map;    /* IN/OUT */
 };
 typedef struct xen_domctl_vcpuaffinity xen_domctl_vcpuaffinity_t;
 DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuaffinity_t);
@@ -932,6 +934,8 @@ struct xen_domctl {
 #define XEN_DOMCTL_setnodeaffinity               68
 #define XEN_DOMCTL_getnodeaffinity               69
 #define XEN_DOMCTL_set_max_evtchn                70
+#define XEN_DOMCTL_setvcpunodeaffinity           71
+#define XEN_DOMCTL_getvcpunodeaffinity           72
 #define XEN_DOMCTL_gdbsx_guestmemio            1000
 #define XEN_DOMCTL_gdbsx_pausevcpu             1001
 #define XEN_DOMCTL_gdbsx_unpausevcpu           1002
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index b1e2593..a9b2401 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -616,10 +616,12 @@ static int flask_domctl(struct domain *d, int cmd)
         return current_has_perm(d, SECCLASS_DOMAIN, DOMAIN__UNPAUSE);
 
     case XEN_DOMCTL_setvcpuaffinity:
+    case XEN_DOMCTL_setvcpunodeaffinity:
     case XEN_DOMCTL_setnodeaffinity:
         return current_has_perm(d, SECCLASS_DOMAIN, DOMAIN__SETAFFINITY);
 
     case XEN_DOMCTL_getvcpuaffinity:
+    case XEN_DOMCTL_getvcpunodeaffinity:
     case XEN_DOMCTL_getnodeaffinity:
         return current_has_perm(d, SECCLASS_DOMAIN, DOMAIN__GETAFFINITY);

  parent reply	other threads:[~2013-11-05 14:35 UTC|newest]

Thread overview: 71+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-11-05 14:33 [PATCH RESEND 00/12] Implement per-vcpu NUMA node-affinity for credit1 Dario Faggioli
2013-11-05 14:34 ` [PATCH RESEND 01/12] xen: numa-sched: leave node-affinity alone if not in "auto" mode Dario Faggioli
2013-11-05 14:43   ` George Dunlap
2013-11-05 14:34 ` [PATCH RESEND 02/12] xl: allow for node-wise specification of vcpu pinning Dario Faggioli
2013-11-05 14:50   ` George Dunlap
2013-11-06  8:48     ` Dario Faggioli
2013-11-07 18:17   ` Ian Jackson
2013-11-08  9:24     ` Dario Faggioli
2013-11-08 15:20       ` Ian Jackson
2013-11-05 14:34 ` [PATCH RESEND 03/12] xl: implement and enable dryrun mode for `xl vcpu-pin' Dario Faggioli
2013-11-05 14:34 ` [PATCH RESEND 04/12] xl: test script for the cpumap parser (for vCPU pinning) Dario Faggioli
2013-11-05 14:35 ` [PATCH RESEND 05/12] xen: numa-sched: make space for per-vcpu node-affinity Dario Faggioli
2013-11-05 14:52   ` Jan Beulich
2013-11-05 15:03     ` George Dunlap
2013-11-05 15:11       ` Jan Beulich
2013-11-05 15:24         ` George Dunlap
2013-11-05 22:15         ` Dario Faggioli
2013-11-05 15:11       ` George Dunlap
2013-11-05 15:23         ` Jan Beulich
2013-11-05 15:39           ` George Dunlap
2013-11-05 16:56             ` George Dunlap
2013-11-05 17:16               ` George Dunlap
2013-11-05 17:30                 ` Jan Beulich
2013-11-05 23:12                   ` Dario Faggioli
2013-11-05 23:01                 ` Dario Faggioli
2013-11-06  9:39                 ` Dario Faggioli
2013-11-06  9:46                   ` Jan Beulich
2013-11-06 10:00                     ` Dario Faggioli
2013-11-06 11:44                       ` George Dunlap
2013-11-06 14:26                         ` Dario Faggioli
2013-11-06 14:56                           ` George Dunlap
2013-11-06 15:14                             ` Jan Beulich
2013-11-06 16:12                               ` George Dunlap
2013-11-06 16:22                                 ` Jan Beulich
2013-11-06 16:48                                 ` Dario Faggioli
2013-11-06 16:20                               ` Dario Faggioli
2013-11-06 16:23                             ` Dario Faggioli
2013-11-05 17:24               ` Jan Beulich
2013-11-05 17:31                 ` George Dunlap
2013-11-05 23:08               ` Dario Faggioli
2013-11-05 22:54             ` Dario Faggioli
2013-11-05 22:22         ` Dario Faggioli
2013-11-06 11:41         ` Dario Faggioli
2013-11-06 14:47           ` George Dunlap
2013-11-06 16:53             ` Dario Faggioli
2013-11-05 14:35 ` [PATCH RESEND 06/12] xen: numa-sched: domain node-affinity always comes from vcpu node-affinity Dario Faggioli
2013-11-05 14:35 ` [PATCH RESEND 07/12] xen: numa-sched: use per-vcpu node-affinity for actual scheduling Dario Faggioli
2013-11-05 16:20   ` George Dunlap
2013-11-06  9:15     ` Dario Faggioli
2013-11-05 14:35 ` Dario Faggioli [this message]
2013-11-05 14:35 ` [PATCH RESEND 09/12] libxc: numa-sched: enable getting/specifying per-vcpu node-affinity Dario Faggioli
2013-11-07 18:27   ` Ian Jackson
2013-11-12 16:01   ` Konrad Rzeszutek Wilk
2013-11-12 16:43     ` George Dunlap
2013-11-12 16:55       ` Konrad Rzeszutek Wilk
2013-11-12 18:40     ` Dario Faggioli
2013-11-12 19:13       ` Konrad Rzeszutek Wilk
2013-11-12 21:36         ` Dario Faggioli
2013-11-13 10:57         ` Dario Faggioli
2013-11-05 14:35 ` [PATCH RESEND 10/12] libxl: " Dario Faggioli
2013-11-07 18:29   ` Ian Jackson
2013-11-08  9:18     ` Dario Faggioli
2013-11-08 15:07       ` Ian Jackson
2013-11-05 14:36 ` [PATCH RESEND 11/12] xl: " Dario Faggioli
2013-11-07 18:33   ` Ian Jackson
2013-11-08  9:33     ` Dario Faggioli
2013-11-08 15:18       ` Ian Jackson
2013-11-05 14:36 ` [PATCH RESEND 12/12] xl: numa-sched: enable specifying node-affinity in VM config file Dario Faggioli
2013-11-07 18:35   ` Ian Jackson
2013-11-08  9:49     ` Dario Faggioli
2013-11-08 15:22       ` Ian Jackson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20131105143531.30446.76559.stgit@Solace \
    --to=dario.faggioli@citrix.com \
    --cc=Andrew.Cooper3@citrix.com \
    --cc=Ian.Campbell@citrix.com \
    --cc=Ian.Jackson@eu.citrix.com \
    --cc=JBeulich@suse.com \
    --cc=Marcus.Granado@eu.citrix.com \
    --cc=dgdegra@tycho.nsa.gov \
    --cc=george.dunlap@eu.citrix.com \
    --cc=jtweaver@hawaii.edu \
    --cc=juergen.gross@ts.fujitsu.com \
    --cc=keir@xen.org \
    --cc=lccycc123@gmail.com \
    --cc=msw@amazon.com \
    --cc=ufimtseva@gmail.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).