From: Dario Faggioli <dario.faggioli@citrix.com>
To: xen-devel@lists.xen.org
Cc: Marcus Granado <Marcus.Granado@eu.citrix.com>,
Keir Fraser <keir@xen.org>,
Ian Campbell <Ian.Campbell@citrix.com>,
Li Yechen <lccycc123@gmail.com>,
George Dunlap <george.dunlap@eu.citrix.com>,
Andrew Cooper <Andrew.Cooper3@citrix.com>,
Juergen Gross <juergen.gross@ts.fujitsu.com>,
Ian Jackson <Ian.Jackson@eu.citrix.com>,
Jan Beulich <JBeulich@suse.com>,
Justin Weaver <jtweaver@hawaii.edu>,
Daniel De Graaf <dgdegra@tycho.nsa.gov>,
Matt Wilson <msw@amazon.com>,
Elena Ufimtseva <ufimtseva@gmail.com>
Subject: [PATCH 11/15] xen: numa-sched: enable getting/specifying per-vcpu node-affinity
Date: Thu, 03 Oct 2013 19:47:00 +0200 [thread overview]
Message-ID: <20131003174700.28472.5776.stgit@Solace> (raw)
In-Reply-To: <20131003174413.28472.8989.stgit@Solace>
via two new DOMCTLs: getvcpunodeaffinity and setvcpunodeaffinity.
They're very similar to XEN_DOMCTL_{get,set}vcpuaffinity
(with the only exception that they take a nodemap instead than
a cpumap).
Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>
---
tools/libxc/xc_domain.c | 8 ++++---
xen/common/domctl.c | 47 ++++++++++++++++++++++++++++++++++++++-----
xen/include/public/domctl.h | 8 +++++--
xen/xsm/flask/hooks.c | 2 ++
4 files changed, 54 insertions(+), 11 deletions(-)
diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index 81316d3..b36c2ad 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -219,9 +219,9 @@ int xc_vcpu_setaffinity(xc_interface *xch,
memcpy(local, cpumap, cpusize);
- set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
+ set_xen_guest_handle(domctl.u.vcpuaffinity.map.bitmap, local);
- domctl.u.vcpuaffinity.cpumap.nr_bits = cpusize * 8;
+ domctl.u.vcpuaffinity.map.nr_bits = cpusize * 8;
ret = do_domctl(xch, &domctl);
@@ -260,8 +260,8 @@ int xc_vcpu_getaffinity(xc_interface *xch,
domctl.domain = (domid_t)domid;
domctl.u.vcpuaffinity.vcpu = vcpu;
- set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
- domctl.u.vcpuaffinity.cpumap.nr_bits = cpusize * 8;
+ set_xen_guest_handle(domctl.u.vcpuaffinity.map.bitmap, local);
+ domctl.u.vcpuaffinity.map.nr_bits = cpusize * 8;
ret = do_domctl(xch, &domctl);
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index 9760d50..7770d30 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -584,7 +584,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
break;
case XEN_DOMCTL_setvcpuaffinity:
- case XEN_DOMCTL_getvcpuaffinity:
+ case XEN_DOMCTL_setvcpunodeaffinity:
{
struct vcpu *v;
@@ -600,8 +600,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
{
cpumask_var_t new_affinity;
- ret = xenctl_bitmap_to_cpumask(
- &new_affinity, &op->u.vcpuaffinity.cpumap);
+ ret = xenctl_bitmap_to_cpumask(&new_affinity,
+ &op->u.vcpuaffinity.map);
if ( !ret )
{
ret = vcpu_set_affinity(v, new_affinity);
@@ -610,8 +610,45 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
}
else
{
- ret = cpumask_to_xenctl_bitmap(
- &op->u.vcpuaffinity.cpumap, v->cpu_affinity);
+ nodemask_t new_affinity;
+
+ ret = xenctl_bitmap_to_nodemask(&new_affinity,
+ &op->u.vcpuaffinity.map);
+ if ( !ret )
+ ret = vcpu_set_node_affinity(v, &new_affinity);
+ }
+ }
+ break;
+
+ case XEN_DOMCTL_getvcpuaffinity:
+ case XEN_DOMCTL_getvcpunodeaffinity:
+ {
+ struct vcpu *v;
+
+ ret = -EINVAL;
+ if ( op->u.vcpuaffinity.vcpu >= d->max_vcpus )
+ break;
+
+ ret = -ESRCH;
+ if ( (v = d->vcpu[op->u.vcpuaffinity.vcpu]) == NULL )
+ break;
+
+ if ( op->cmd == XEN_DOMCTL_getvcpuaffinity )
+ {
+ ret = cpumask_to_xenctl_bitmap(&op->u.vcpuaffinity.map,
+ v->cpu_affinity);
+ }
+ else
+ {
+ nodemask_t affinity;
+ int cpu;
+
+ nodes_clear(affinity);
+ for_each_cpu ( cpu, v->node_affinity )
+ node_set(cpu_to_node(cpu), affinity);
+
+ ret = nodemask_to_xenctl_bitmap(&op->u.vcpuaffinity.map,
+ &affinity);
}
}
break;
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 4c5b2bb..07d43f2 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -290,12 +290,14 @@ typedef struct xen_domctl_nodeaffinity xen_domctl_nodeaffinity_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_nodeaffinity_t);
-/* Get/set which physical cpus a vcpu can execute on. */
+/* Get/set which physical cpus a vcpu can or prefer execute on. */
/* XEN_DOMCTL_setvcpuaffinity */
/* XEN_DOMCTL_getvcpuaffinity */
+/* XEN_DOMCTL_setvcpunodeaffinity */
+/* XEN_DOMCTL_getvcpunodeaffinity */
struct xen_domctl_vcpuaffinity {
uint32_t vcpu; /* IN */
- struct xenctl_bitmap cpumap; /* IN/OUT */
+ struct xenctl_bitmap map; /* IN/OUT */
};
typedef struct xen_domctl_vcpuaffinity xen_domctl_vcpuaffinity_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuaffinity_t);
@@ -920,6 +922,8 @@ struct xen_domctl {
#define XEN_DOMCTL_set_broken_page_p2m 67
#define XEN_DOMCTL_setnodeaffinity 68
#define XEN_DOMCTL_getnodeaffinity 69
+#define XEN_DOMCTL_setvcpunodeaffinity 70
+#define XEN_DOMCTL_getvcpunodeaffinity 71
#define XEN_DOMCTL_gdbsx_guestmemio 1000
#define XEN_DOMCTL_gdbsx_pausevcpu 1001
#define XEN_DOMCTL_gdbsx_unpausevcpu 1002
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index fa0589a..d7cfeaf 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -616,10 +616,12 @@ static int flask_domctl(struct domain *d, int cmd)
return current_has_perm(d, SECCLASS_DOMAIN, DOMAIN__UNPAUSE);
case XEN_DOMCTL_setvcpuaffinity:
+ case XEN_DOMCTL_setvcpunodeaffinity:
case XEN_DOMCTL_setnodeaffinity:
return current_has_perm(d, SECCLASS_DOMAIN, DOMAIN__SETAFFINITY);
case XEN_DOMCTL_getvcpuaffinity:
+ case XEN_DOMCTL_getvcpunodeaffinity:
case XEN_DOMCTL_getnodeaffinity:
return current_has_perm(d, SECCLASS_DOMAIN, DOMAIN__GETAFFINITY);
next prev parent reply other threads:[~2013-10-03 17:47 UTC|newest]
Thread overview: 34+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-10-03 17:45 [PATCH 00/15] Implement per-vcpu NUMA node-affinity for credit1 Dario Faggioli
2013-10-03 17:45 ` [PATCH 01/15] xl: update the manpage about "cpus=" and NUMA node-affinity Dario Faggioli
2013-10-10 11:43 ` Ian Campbell
2013-10-10 13:55 ` Dario Faggioli
2013-10-03 17:45 ` [PATCH 02/15] xl: fix a typo in main_vcpulist() Dario Faggioli
2013-10-10 10:29 ` Ian Campbell
2013-10-10 11:43 ` Ian Campbell
2013-10-10 13:54 ` Dario Faggioli
2013-10-10 13:57 ` Ian Campbell
2013-10-03 17:45 ` [PATCH 03/15] xen: numa-sched: leave node-affinity alone if not in "auto" mode Dario Faggioli
2013-11-05 14:21 ` George Dunlap
2013-11-05 14:37 ` Jan Beulich
2013-11-05 14:45 ` George Dunlap
2013-11-05 14:49 ` Dario Faggioli
2013-10-03 17:46 ` [PATCH 04/15] libxl: introduce libxl_node_to_cpumap Dario Faggioli
2013-10-10 11:44 ` Ian Campbell
2013-10-03 17:46 ` [PATCH 05/15] xl: allow for node-wise specification of vcpu pinning Dario Faggioli
2013-10-03 17:46 ` [PATCH 06/15] xl: implement and enable dryrun mode for `xl vcpu-pin' Dario Faggioli
2013-10-03 17:46 ` [PATCH 07/15] xl: test script for the cpumap parser (for vCPU pinning) Dario Faggioli
2013-10-10 10:32 ` Ian Campbell
2013-10-10 14:35 ` Dario Faggioli
2013-10-14 16:42 ` Ian Jackson
2013-10-14 17:00 ` Dario Faggioli
2013-10-03 17:46 ` [PATCH 08/15] xen: numa-sched: make space for per-vcpu node-affinity Dario Faggioli
2013-10-03 17:46 ` [PATCH 09/15] xen: numa-sched: domain node-affinity always comes from vcpu node-affinity Dario Faggioli
2013-10-03 17:46 ` [PATCH 10/15] xen: numa-sched: use per-vcpu node-affinity for actual scheduling Dario Faggioli
2013-10-03 17:47 ` Dario Faggioli [this message]
2013-10-03 17:47 ` [PATCH 12/15] libxc: numa-sched: enable getting/specifying per-vcpu node-affinity Dario Faggioli
2013-10-03 17:47 ` [PATCH 13/15] libxl: " Dario Faggioli
2013-10-03 17:47 ` [PATCH 14/15] xl: " Dario Faggioli
2013-10-03 17:47 ` [PATCH 15/15] xl: numa-sched: enable specifying node-affinity in VM config file Dario Faggioli
2013-11-05 11:29 ` [PATCH 00/15] Implement per-vcpu NUMA node-affinity for credit1 Dario Faggioli
2013-11-05 11:32 ` George Dunlap
2013-11-05 14:05 ` Dario Faggioli
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20131003174700.28472.5776.stgit@Solace \
--to=dario.faggioli@citrix.com \
--cc=Andrew.Cooper3@citrix.com \
--cc=Ian.Campbell@citrix.com \
--cc=Ian.Jackson@eu.citrix.com \
--cc=JBeulich@suse.com \
--cc=Marcus.Granado@eu.citrix.com \
--cc=dgdegra@tycho.nsa.gov \
--cc=george.dunlap@eu.citrix.com \
--cc=jtweaver@hawaii.edu \
--cc=juergen.gross@ts.fujitsu.com \
--cc=keir@xen.org \
--cc=lccycc123@gmail.com \
--cc=msw@amazon.com \
--cc=ufimtseva@gmail.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).