xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Dario Faggioli <dario.faggioli@citrix.com>
To: xen-devel@lists.xen.org
Cc: Marcus Granado <Marcus.Granado@eu.citrix.com>,
	Keir Fraser <keir@xen.org>,
	Ian Campbell <Ian.Campbell@citrix.com>,
	Li Yechen <lccycc123@gmail.com>,
	George Dunlap <george.dunlap@eu.citrix.com>,
	Andrew Cooper <Andrew.Cooper3@citrix.com>,
	Juergen Gross <juergen.gross@ts.fujitsu.com>,
	Ian Jackson <Ian.Jackson@eu.citrix.com>,
	Jan Beulich <JBeulich@suse.com>,
	Justin Weaver <jtweaver@hawaii.edu>, Matt Wilson <msw@amazon.com>,
	Elena Ufimtseva <ufimtseva@gmail.com>
Subject: [PATCH v3 10/14] libxc: get and set soft and hard affinity
Date: Mon, 18 Nov 2013 19:18:05 +0100	[thread overview]
Message-ID: <20131118181805.31002.28692.stgit@Solace> (raw)
In-Reply-To: <20131118175544.31002.79574.stgit@Solace>

by using the new flag introduced in the parameters of the
DOMCTL_{get,set}_vcpuaffinity hypercall.

This happens by adding a new parameter (flags) to
xc_vcpu_setaffinity() and xc_vcpu_getaffinity(), so that the
caller can decide to set either the soft or hard affinity, or
even both.

In case of setting both hard and soft, they are set to the
same cpumap. xc_get_setaffinity() also takes another new param,
for reporting back to the caller what the actual affinity the
scheduler uses will be after a successful call.
In case of asking to get both hard and soft, what the caller
gets is the intersection between them.

In-tree callers are also fixed to cope with the new interface.

Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>
---
Changes from v2:
 * better cleanup logic in _vcpu_setaffinity() (regarding
   xc_hypercall_buffer_{alloc,free}()), as suggested during
   review;
 * make it more evident that DOMCTL_setvcpuaffinity has an out
   parameter, by calling ecpumap_out, and improving the comment
   wrt that;
 * change the interface and have xc_vcpu_[sg]etaffinity() so
   that they take the new parameters (flags and ecpumap_out) and
   fix the in tree callers.
---
 tools/libxc/xc_domain.c             |   47 +++++++++++++++++++++--------------
 tools/libxc/xenctrl.h               |   44 ++++++++++++++++++++++++++++++++-
 tools/libxl/libxl.c                 |    7 ++++-
 tools/ocaml/libs/xc/xenctrl_stubs.c |    8 ++++--
 tools/python/xen/lowlevel/xc/xc.c   |    6 +++-
 5 files changed, 86 insertions(+), 26 deletions(-)

diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index f9ae4bf..bddf4e0 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -192,44 +192,52 @@ int xc_domain_node_getaffinity(xc_interface *xch,
 int xc_vcpu_setaffinity(xc_interface *xch,
                         uint32_t domid,
                         int vcpu,
-                        xc_cpumap_t cpumap)
+                        xc_cpumap_t cpumap,
+                        uint32_t flags,
+                        xc_cpumap_t ecpumap_out)
 {
     DECLARE_DOMCTL;
-    DECLARE_HYPERCALL_BUFFER(uint8_t, local);
+    DECLARE_HYPERCALL_BUFFER(uint8_t, cpumap_local);
+    DECLARE_HYPERCALL_BUFFER(uint8_t, ecpumap_local);
     int ret = -1;
     int cpusize;
 
     cpusize = xc_get_cpumap_size(xch);
-    if (!cpusize)
+    if ( !cpusize )
     {
         PERROR("Could not get number of cpus");
-        goto out;
+        return -1;;
     }
 
-    local = xc_hypercall_buffer_alloc(xch, local, cpusize);
-    if ( local == NULL )
+    cpumap_local = xc_hypercall_buffer_alloc(xch, cpumap_local, cpusize);
+    ecpumap_local = xc_hypercall_buffer_alloc(xch, ecpumap_local, cpusize);
+    if ( cpumap_local == NULL || cpumap_local == NULL)
     {
-        PERROR("Could not allocate memory for setvcpuaffinity domctl hypercall");
+        PERROR("Could not allocate hcall buffers for DOMCTL_setvcpuaffinity");
         goto out;
     }
 
     domctl.cmd = XEN_DOMCTL_setvcpuaffinity;
     domctl.domain = (domid_t)domid;
     domctl.u.vcpuaffinity.vcpu = vcpu;
-    /* Soft affinity is there, but not used anywhere for now, so... */
-    domctl.u.vcpuaffinity.flags = XEN_VCPUAFFINITY_HARD;
-
-    memcpy(local, cpumap, cpusize);
-
-    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
+    domctl.u.vcpuaffinity.flags = flags;
 
+    memcpy(cpumap_local, cpumap, cpusize);
+    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, cpumap_local);
     domctl.u.vcpuaffinity.cpumap.nr_bits = cpusize * 8;
 
+    set_xen_guest_handle(domctl.u.vcpuaffinity.eff_cpumap.bitmap,
+                         ecpumap_local);
+    domctl.u.vcpuaffinity.eff_cpumap.nr_bits = cpusize * 8;
+
     ret = do_domctl(xch, &domctl);
 
-    xc_hypercall_buffer_free(xch, local);
+    if ( ecpumap_out != NULL )
+        memcpy(ecpumap_out, ecpumap_local, cpusize);
 
  out:
+    xc_hypercall_buffer_free(xch, cpumap_local);
+    xc_hypercall_buffer_free(xch, ecpumap_local);
     return ret;
 }
 
@@ -237,6 +245,7 @@ int xc_vcpu_setaffinity(xc_interface *xch,
 int xc_vcpu_getaffinity(xc_interface *xch,
                         uint32_t domid,
                         int vcpu,
+                        uint32_t flags,
                         xc_cpumap_t cpumap)
 {
     DECLARE_DOMCTL;
@@ -245,22 +254,23 @@ int xc_vcpu_getaffinity(xc_interface *xch,
     int cpusize;
 
     cpusize = xc_get_cpumap_size(xch);
-    if (!cpusize)
+    if ( !cpusize )
     {
         PERROR("Could not get number of cpus");
-        goto out;
+        return -1;
     }
 
     local = xc_hypercall_buffer_alloc(xch, local, cpusize);
-    if (local == NULL)
+    if ( local == NULL )
     {
         PERROR("Could not allocate memory for getvcpuaffinity domctl hypercall");
-        goto out;
+        return -1;
     }
 
     domctl.cmd = XEN_DOMCTL_getvcpuaffinity;
     domctl.domain = (domid_t)domid;
     domctl.u.vcpuaffinity.vcpu = vcpu;
+    domctl.u.vcpuaffinity.flags = flags;
 
     set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
     domctl.u.vcpuaffinity.cpumap.nr_bits = cpusize * 8;
@@ -270,7 +280,6 @@ int xc_vcpu_getaffinity(xc_interface *xch,
     memcpy(cpumap, local, cpusize);
 
     xc_hypercall_buffer_free(xch, local);
-out:
     return ret;
 }
 
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
index 4ac6b8a..a97ed67 100644
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -579,13 +579,55 @@ int xc_domain_node_getaffinity(xc_interface *xch,
                                uint32_t domind,
                                xc_nodemap_t nodemap);
 
+/**
+ * This function specifies the CPU affinity for a vcpu.
+ *
+ * There are two kinds of affinity. Soft affinity is on what pcpus a vcpu
+ * prefers to run. Hard affinity is on what pcpus a vcpu is allowed to run.
+ * If flags contains *only* XEN_VCPUAFFINITY_SOFT, it is the soft affinity
+ * that is set. If flags contains *only* XEN_VCPUAFFINITY_HARD, it is the
+ * hard affinity that is set. If flags contains *both*, both are set to the
+ * same value, provided in cpumap.
+ *
+ * The function also returns the effective affinity, via the ecpumap_out
+ * parameter. Effective affinity it the intersection of soft affinity, hard
+ * affinity and the set of the cpus of the cpupool the domain belongs to.
+ * It basically is what the Xen scheduler will actually use. Reporting it
+ * back to the caller allows him to check if that matches with, or at least
+ * is good enough for, his purposes.
+ *
+ * @param xch a handle to an open hypervisor interface.
+ * @param domid the id of the domain to which the vcpu belongs
+ * @param vcpu the vcpu id wihin the domain
+ * @param cpumap the (hard, soft, both) new affinity map one wants to set
+ * #param flags what we want to set
+ * @param ecpumap_out where the effective affinity for the vcpu is returned
+ */
 int xc_vcpu_setaffinity(xc_interface *xch,
                         uint32_t domid,
                         int vcpu,
-                        xc_cpumap_t cpumap);
+                        xc_cpumap_t cpumap,
+                        uint32_t flags,
+                        xc_cpumap_t ecpumap_out);
+
+/**
+ * This function retrieves hard or soft CPU affinity (or their intersection)
+ * for a vcpu, depending on flags.
+ *
+ * Soft affinity is returned if *only* XEN_VCPUAFFINITY_SOFT is set in flags.
+ * Hard affinity is returned if *only* XEN_VCPUAFFINITY_HARD is set in flags.
+ * If both are set, what is returned is the intersection of the two.
+ *
+ * @param xch a handle to an open hypervisor interface.
+ * @param domid the id of the domain to which the vcpu belongs
+ * @param vcpu the vcpu id wihin the domain
+ * @param flags what we want get
+ * @param cpumap is where the desired affinity is returned
+ */
 int xc_vcpu_getaffinity(xc_interface *xch,
                         uint32_t domid,
                         int vcpu,
+                        uint32_t flags,
                         xc_cpumap_t cpumap);
 
 
diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
index d3ab65e..d0db3f0 100644
--- a/tools/libxl/libxl.c
+++ b/tools/libxl/libxl.c
@@ -4208,7 +4208,9 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ctx *ctx, uint32_t domid,
             LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting vcpu info");
             return NULL;
         }
-        if (xc_vcpu_getaffinity(ctx->xch, domid, *nb_vcpu, ptr->cpumap.map) == -1) {
+        if (xc_vcpu_getaffinity(ctx->xch, domid, *nb_vcpu,
+                                XEN_VCPUAFFINITY_HARD,
+                                ptr->cpumap.map) == -1) {
             LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting vcpu affinity");
             return NULL;
         }
@@ -4225,7 +4227,8 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ctx *ctx, uint32_t domid,
 int libxl_set_vcpuaffinity(libxl_ctx *ctx, uint32_t domid, uint32_t vcpuid,
                            libxl_bitmap *cpumap)
 {
-    if (xc_vcpu_setaffinity(ctx->xch, domid, vcpuid, cpumap->map)) {
+    if (xc_vcpu_setaffinity(ctx->xch, domid, vcpuid, cpumap->map,
+                            XEN_VCPUAFFINITY_HARD, NULL)) {
         LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "setting vcpu affinity");
         return ERROR_FAIL;
     }
diff --git a/tools/ocaml/libs/xc/xenctrl_stubs.c b/tools/ocaml/libs/xc/xenctrl_stubs.c
index f5cf0ed..30327d4 100644
--- a/tools/ocaml/libs/xc/xenctrl_stubs.c
+++ b/tools/ocaml/libs/xc/xenctrl_stubs.c
@@ -438,7 +438,9 @@ CAMLprim value stub_xc_vcpu_setaffinity(value xch, value domid,
 			c_cpumap[i/8] |= 1 << (i&7);
 	}
 	retval = xc_vcpu_setaffinity(_H(xch), _D(domid),
-	                             Int_val(vcpu), c_cpumap);
+				     Int_val(vcpu), c_cpumap,
+				     XEN_VCPUAFFINITY_HARD,
+				     NULL);
 	free(c_cpumap);
 
 	if (retval < 0)
@@ -460,7 +462,9 @@ CAMLprim value stub_xc_vcpu_getaffinity(value xch, value domid,
 		failwith_xc(_H(xch));
 
 	retval = xc_vcpu_getaffinity(_H(xch), _D(domid),
-	                             Int_val(vcpu), c_cpumap);
+				     Int_val(vcpu),
+				     XEN_VCPUAFFINITY_HARD,
+				     c_cpumap);
 	if (retval < 0) {
 		free(c_cpumap);
 		failwith_xc(_H(xch));
diff --git a/tools/python/xen/lowlevel/xc/xc.c b/tools/python/xen/lowlevel/xc/xc.c
index 2625fc4..9348ce6 100644
--- a/tools/python/xen/lowlevel/xc/xc.c
+++ b/tools/python/xen/lowlevel/xc/xc.c
@@ -256,7 +256,8 @@ static PyObject *pyxc_vcpu_setaffinity(XcObject *self,
         }
     }
   
-    if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap) != 0 )
+    if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap,
+                             XEN_VCPUAFFINITY_HARD, NULL) != 0 )
     {
         free(cpumap);
         return pyxc_error_to_exception(self->xc_handle);
@@ -403,7 +404,8 @@ static PyObject *pyxc_vcpu_getinfo(XcObject *self,
     if(cpumap == NULL)
         return pyxc_error_to_exception(self->xc_handle);
 
-    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap);
+    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu,
+                             XEN_VCPUAFFINITY_HARD, cpumap);
     if ( rc < 0 )
     {
         free(cpumap);

  parent reply	other threads:[~2013-11-18 18:18 UTC|newest]

Thread overview: 65+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-11-18 18:16 [PATCH v3 00/14] Series short description Dario Faggioli
2013-11-18 18:16 ` [PATCH v3 01/14] xl: match output of vcpu-list with pinning syntax Dario Faggioli
2013-11-18 18:16 ` [PATCH v3 02/14] libxl: sanitize error handling in libxl_get_max_{cpus, nodes} Dario Faggioli
2013-11-19 12:24   ` George Dunlap
2013-11-19 12:34     ` Dario Faggioli
2013-11-18 18:16 ` [PATCH v3 03/14] xl: allow for node-wise specification of vcpu pinning Dario Faggioli
2013-11-18 18:17 ` [PATCH v3 04/14] xl: implement and enable dryrun mode for `xl vcpu-pin' Dario Faggioli
2013-11-18 18:17 ` [PATCH v3 05/14] xl: test script for the cpumap parser (for vCPU pinning) Dario Faggioli
2013-11-18 18:17 ` [PATCH v3 06/14] xen: sched: rename v->cpu_affinity into v->cpu_hard_affinity Dario Faggioli
2013-11-18 18:17 ` [PATCH v3 07/14] xen: sched: introduce soft-affinity and use it instead d->node-affinity Dario Faggioli
2013-11-18 18:17 ` [PATCH v3 08/14] xen: derive NUMA node affinity from hard and soft CPU affinity Dario Faggioli
2013-11-19 14:14   ` George Dunlap
2013-11-19 16:20   ` Jan Beulich
2013-11-19 16:35     ` Dario Faggioli
2013-11-18 18:17 ` [PATCH v3 09/14] xen: sched: DOMCTL_*vcpuaffinity works with hard and soft affinity Dario Faggioli
2013-11-19 14:32   ` George Dunlap
2013-11-19 16:39   ` Jan Beulich
2013-11-22 18:55     ` Dario Faggioli
2013-11-25  9:32       ` Jan Beulich
2013-11-25  9:54         ` Dario Faggioli
2013-11-25 10:00           ` Jan Beulich
2013-11-25 10:58             ` George Dunlap
2013-11-18 18:18 ` Dario Faggioli [this message]
2013-11-19 14:51   ` [PATCH v3 10/14] libxc: get and set soft and hard affinity George Dunlap
2013-11-19 14:57     ` Ian Campbell
2013-11-19 14:58       ` George Dunlap
2013-11-19 17:08   ` Ian Campbell
2013-11-19 18:01     ` Dario Faggioli
2013-11-18 18:18 ` [PATCH v3 11/14] libxl: get and set soft affinity Dario Faggioli
2013-11-19 15:41   ` George Dunlap
2013-11-19 16:09     ` Dario Faggioli
2013-11-19 17:15       ` Ian Campbell
2013-11-19 18:58         ` Dario Faggioli
2013-11-20 11:30           ` Ian Campbell
2013-11-20 13:59             ` George Dunlap
2013-11-20 14:04               ` Ian Campbell
2013-11-20 16:59                 ` Ian Jackson
2013-11-20 17:46                   ` Dario Faggioli
2013-11-20 14:09       ` George Dunlap
2013-11-19 17:24   ` Ian Campbell
2013-11-19 17:51     ` Dario Faggioli
2013-11-20 11:27       ` Ian Campbell
2013-11-20 11:29         ` George Dunlap
2013-11-20 11:32           ` Ian Campbell
2013-11-20 11:40             ` Dario Faggioli
2013-11-20 14:45               ` George Dunlap
2013-11-20 14:52                 ` Dario Faggioli
2013-11-20 12:00         ` Dario Faggioli
2013-11-20 12:05           ` Ian Campbell
2013-11-20 12:18             ` Dario Faggioli
2013-11-20 12:26               ` Ian Campbell
2013-11-20 14:50                 ` Dario Faggioli
2013-11-20 14:56                   ` Ian Campbell
2013-11-20 16:27                     ` Dario Faggioli
2013-11-18 18:18 ` [PATCH v3 12/14] xl: enable getting and setting soft Dario Faggioli
2013-11-19 17:30   ` Ian Campbell
2013-11-19 17:52     ` Dario Faggioli
2013-11-18 18:18 ` [PATCH v3 13/14] xl: enable for specifying node-affinity in the config file Dario Faggioli
2013-11-19 17:35   ` Ian Campbell
2013-11-18 18:18 ` [PATCH v3 14/14] libxl: automatic NUMA placement affects soft affinity Dario Faggioli
2013-11-19 17:41   ` Ian Campbell
2013-11-19 17:57     ` Dario Faggioli
2013-11-18 18:20 ` [PATCH v3 00/14] Series short description Dario Faggioli
2013-11-19 16:00 ` George Dunlap
2013-11-19 16:08   ` Jan Beulich

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20131118181805.31002.28692.stgit@Solace \
    --to=dario.faggioli@citrix.com \
    --cc=Andrew.Cooper3@citrix.com \
    --cc=Ian.Campbell@citrix.com \
    --cc=Ian.Jackson@eu.citrix.com \
    --cc=JBeulich@suse.com \
    --cc=Marcus.Granado@eu.citrix.com \
    --cc=george.dunlap@eu.citrix.com \
    --cc=jtweaver@hawaii.edu \
    --cc=juergen.gross@ts.fujitsu.com \
    --cc=keir@xen.org \
    --cc=lccycc123@gmail.com \
    --cc=msw@amazon.com \
    --cc=ufimtseva@gmail.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).