xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Wei Liu <wei.liu2@citrix.com>
To: Xen-devel <xen-devel@lists.xenproject.org>
Cc: Wei Liu <wei.liu2@citrix.com>,
	Ian Jackson <Ian.Jackson@eu.citrix.com>,
	Ian Campbell <ian.campbell@citrix.com>
Subject: [PATCH for 4.6 v3 1/3] libxc: introduce xc_domain_getvnuma
Date: Thu, 10 Sep 2015 15:50:49 +0100	[thread overview]
Message-ID: <1441896651-24735-2-git-send-email-wei.liu2@citrix.com> (raw)
In-Reply-To: <1441896651-24735-1-git-send-email-wei.liu2@citrix.com>

A simple wrapper for XENMEM_get_vnumainfo.

Signed-off-by: Wei Liu <wei.liu2@citrix.com>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
---
 tools/libxc/include/xenctrl.h | 18 +++++++++++++++
 tools/libxc/xc_domain.c       | 54 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 72 insertions(+)

diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index 2000f12..37205c2 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -1287,6 +1287,24 @@ int xc_domain_setvnuma(xc_interface *xch,
                         unsigned int *vdistance,
                         unsigned int *vcpu_to_vnode,
                         unsigned int *vnode_to_pnode);
+/*
+ * Retrieve vnuma configuration
+ * domid: IN, target domid
+ * nr_vnodes: IN/OUT, number of vnodes, not NULL
+ * nr_vmemranges: IN/OUT, number of vmemranges, not NULL
+ * nr_vcpus: IN/OUT, number of vcpus, not NULL
+ * vmemranges: OUT, an array which has length of nr_vmemranges
+ * vdistance: OUT, an array which has length of nr_vnodes * nr_vnodes
+ * vcpu_to_vnode: OUT, an array which has length of nr_vcpus
+ */
+int xc_domain_getvnuma(xc_interface *xch,
+                       uint32_t domid,
+                       uint32_t *nr_vnodes,
+                       uint32_t *nr_vmemranges,
+                       uint32_t *nr_vcpus,
+                       xen_vmemrange_t *vmemrange,
+                       unsigned int *vdistance,
+                       unsigned int *vcpu_to_vnode);
 
 #if defined(__i386__) || defined(__x86_64__)
 /*
diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index 780797f..09ef748 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -2493,6 +2493,60 @@ int xc_domain_setvnuma(xc_interface *xch,
     return rc;
 }
 
+int xc_domain_getvnuma(xc_interface *xch,
+                       uint32_t domid,
+                       uint32_t *nr_vnodes,
+                       uint32_t *nr_vmemranges,
+                       uint32_t *nr_vcpus,
+                       xen_vmemrange_t *vmemrange,
+                       unsigned int *vdistance,
+                       unsigned int *vcpu_to_vnode)
+{
+    int rc;
+    DECLARE_HYPERCALL_BOUNCE(vmemrange, sizeof(*vmemrange) * *nr_vmemranges,
+                             XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+    DECLARE_HYPERCALL_BOUNCE(vdistance, sizeof(*vdistance) *
+                             *nr_vnodes * *nr_vnodes,
+                             XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+    DECLARE_HYPERCALL_BOUNCE(vcpu_to_vnode, sizeof(*vcpu_to_vnode) * *nr_vcpus,
+                             XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+    struct xen_vnuma_topology_info vnuma_topo;
+
+    if ( xc_hypercall_bounce_pre(xch, vmemrange)      ||
+         xc_hypercall_bounce_pre(xch, vdistance)      ||
+         xc_hypercall_bounce_pre(xch, vcpu_to_vnode) )
+    {
+        rc = -1;
+        errno = ENOMEM;
+        goto vnumaget_fail;
+    }
+
+    set_xen_guest_handle(vnuma_topo.vmemrange.h, vmemrange);
+    set_xen_guest_handle(vnuma_topo.vdistance.h, vdistance);
+    set_xen_guest_handle(vnuma_topo.vcpu_to_vnode.h, vcpu_to_vnode);
+
+    vnuma_topo.nr_vnodes = *nr_vnodes;
+    vnuma_topo.nr_vcpus = *nr_vcpus;
+    vnuma_topo.nr_vmemranges = *nr_vmemranges;
+    vnuma_topo.domid = domid;
+    vnuma_topo.pad = 0;
+
+    rc = do_memory_op(xch, XENMEM_get_vnumainfo, &vnuma_topo,
+                      sizeof(vnuma_topo));
+
+    *nr_vnodes = vnuma_topo.nr_vnodes;
+    *nr_vcpus = vnuma_topo.nr_vcpus;
+    *nr_vmemranges = vnuma_topo.nr_vmemranges;
+
+ vnumaget_fail:
+    xc_hypercall_bounce_post(xch, vmemrange);
+    xc_hypercall_bounce_post(xch, vdistance);
+    xc_hypercall_bounce_post(xch, vcpu_to_vnode);
+
+    return rc;
+}
+
 /*
  * Local variables:
  * mode: C
-- 
2.1.4

  reply	other threads:[~2015-09-10 14:50 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-09-10 14:50 [PATCH for 4.6 v3 0/3] More vNUMA patches Wei Liu
2015-09-10 14:50 ` Wei Liu [this message]
2015-09-10 14:50 ` [PATCH for 4.6 v3 2/3] xl/libxl: disallow saving a guest with vNUMA configured Wei Liu
2015-09-10 16:10   ` Ian Campbell
2015-09-10 16:15     ` Wei Liu
2015-09-10 16:53       ` Ian Campbell
2015-09-10 17:05         ` Wei Liu
2015-09-11 10:50           ` Ian Campbell
2015-09-11 13:21             ` Ian Campbell
2015-09-11 13:43               ` Wei Liu
2015-09-11 13:59                 ` Ian Campbell
2015-09-11 14:11                   ` Wei Liu
2015-09-10 14:50 ` [PATCH for 4.6 v3 3/3] xl: handle empty vnuma configuration Wei Liu
2015-09-10 15:35   ` Ian Campbell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1441896651-24735-2-git-send-email-wei.liu2@citrix.com \
    --to=wei.liu2@citrix.com \
    --cc=Ian.Jackson@eu.citrix.com \
    --cc=ian.campbell@citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).