From: Andrew Cooper <andrew.cooper3@citrix.com>
To: Xen-devel <xen-devel@lists.xen.org>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
Ian Jackson <Ian.Jackson@eu.citrix.com>,
Ian Campbell <Ian.Campbell@citrix.com>
Subject: [PATCH RFC 1/2] tools/libxc: Improved xc_{topology, numa}info functions.
Date: Thu, 27 Feb 2014 11:11:35 +0000 [thread overview]
Message-ID: <1393499497-9162-2-git-send-email-andrew.cooper3@citrix.com> (raw)
In-Reply-To: <1393499497-9162-1-git-send-email-andrew.cooper3@citrix.com>
These two new functions provide a substantially easier-to-use API, where libxc
itself takes care of all the appropriate bounce buffering.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
CC: Ian Campbell <Ian.Campbell@citrix.com>
CC: Ian Jackson <Ian.Jackson@eu.citrix.com>
---
tools/libxc/xc_misc.c | 85 +++++++++++++++++++++++++++++++++++++++++++++++++
tools/libxc/xenctrl.h | 49 ++++++++++++++++++++++++++++
2 files changed, 134 insertions(+)
diff --git a/tools/libxc/xc_misc.c b/tools/libxc/xc_misc.c
index 3303454..4f672ce 100644
--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -195,6 +195,46 @@ int xc_topologyinfo(xc_interface *xch,
return 0;
}
+int xc_topologyinfo_bounced(xc_interface *xch,
+ uint32_t *max_cpu_index,
+ uint32_t *cpu_to_core,
+ uint32_t *cpu_to_socket,
+ uint32_t *cpu_to_node)
+{
+ int ret = -1;
+ size_t sz = sizeof(uint32_t) * (*max_cpu_index + 1);
+
+ DECLARE_HYPERCALL_BOUNCE(cpu_to_core, sz, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+ DECLARE_HYPERCALL_BOUNCE(cpu_to_socket, sz, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+ DECLARE_HYPERCALL_BOUNCE(cpu_to_node, sz, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+ DECLARE_SYSCTL;
+
+ if ( xc_hypercall_bounce_pre(xch, cpu_to_core) ||
+ xc_hypercall_bounce_pre(xch, cpu_to_socket) ||
+ xc_hypercall_bounce_pre(xch, cpu_to_node) )
+ goto out;
+
+ sysctl.cmd = XEN_SYSCTL_topologyinfo;
+ sysctl.u.topologyinfo.max_cpu_index = *max_cpu_index;
+
+ set_xen_guest_handle(sysctl.u.topologyinfo.cpu_to_core, cpu_to_core);
+ set_xen_guest_handle(sysctl.u.topologyinfo.cpu_to_socket, cpu_to_socket);
+ set_xen_guest_handle(sysctl.u.topologyinfo.cpu_to_node, cpu_to_node);
+
+ ret = do_sysctl(xch, &sysctl);
+
+ if ( ret )
+ goto out;
+
+ *max_cpu_index = sysctl.u.topologyinfo.max_cpu_index;
+
+out:
+ xc_hypercall_bounce_post(xch, cpu_to_node);
+ xc_hypercall_bounce_post(xch, cpu_to_socket);
+ xc_hypercall_bounce_post(xch, cpu_to_core);
+ return ret;
+}
+
int xc_numainfo(xc_interface *xch,
xc_numainfo_t *put_info)
{
@@ -213,6 +253,51 @@ int xc_numainfo(xc_interface *xch,
return 0;
}
+int xc_numainfo_bounced(xc_interface *xch,
+ uint32_t *max_node_index,
+ uint64_t *node_to_memsize,
+ uint64_t *node_to_memfree,
+ uint32_t *node_to_node_distance)
+{
+ int ret = -1;
+ size_t mem_sz = sizeof(uint64_t) * (*max_node_index + 1);
+ size_t distance_sz = (sizeof(uint32_t) * (*max_node_index + 1) *
+ (*max_node_index + 1));
+
+ DECLARE_HYPERCALL_BOUNCE(node_to_memsize, mem_sz,
+ XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+ DECLARE_HYPERCALL_BOUNCE(node_to_memfree, mem_sz,
+ XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+ DECLARE_HYPERCALL_BOUNCE(node_to_node_distance, distance_sz,
+ XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+ DECLARE_SYSCTL;
+
+ if ( xc_hypercall_bounce_pre(xch, node_to_memsize) ||
+ xc_hypercall_bounce_pre(xch, node_to_memfree) ||
+ xc_hypercall_bounce_pre(xch, node_to_node_distance) )
+ goto out;
+
+ sysctl.cmd = XEN_SYSCTL_numainfo;
+ sysctl.u.numainfo.max_node_index = *max_node_index;
+
+ set_xen_guest_handle(sysctl.u.numainfo.node_to_memsize, node_to_memsize);
+ set_xen_guest_handle(sysctl.u.numainfo.node_to_memfree, node_to_memfree);
+ set_xen_guest_handle(sysctl.u.numainfo.node_to_node_distance,
+ node_to_node_distance);
+
+ ret = do_sysctl(xch, &sysctl);
+
+ if ( ret )
+ goto out;
+
+ *max_node_index = sysctl.u.numainfo.max_node_index;
+
+out:
+ xc_hypercall_bounce_post(xch, node_to_node_distance);
+ xc_hypercall_bounce_post(xch, node_to_memfree);
+ xc_hypercall_bounce_post(xch, node_to_memsize);
+ return ret;
+}
int xc_sched_id(xc_interface *xch,
int *sched_id)
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
index 13f816b..50126ae 100644
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -1144,9 +1144,58 @@ typedef uint64_t xc_node_to_memfree_t;
typedef uint32_t xc_node_to_node_dist_t;
int xc_physinfo(xc_interface *xch, xc_physinfo_t *info);
+
+/* Query Xen for the cpu topology information. The caller is responsible for
+ * ensuring correct hypercall buffering. */
int xc_topologyinfo(xc_interface *xch, xc_topologyinfo_t *info);
+
+/**
+ * Query Xen for the cpu topology information. The library shall ensure
+ * correct bounce buffering is performed.
+ *
+ * The following parameters behave exactly as described in Xen's public
+ * sysctl.h. Arrays may be NULL if the information is not wanted.
+ *
+ * Each array should have (max_cpu_index + 1) elements.
+ *
+ * @param [in/out] max_cpu_index
+ * @param [out] cpu_to_core
+ * @param [out] cpu_to_socket
+ * @param [out] cpu_to_node
+ * @returns 0 on success, -1 and sets errno on error.
+ */
+int xc_topologyinfo_bounced(xc_interface *xch,
+ uint32_t *max_cpu_index,
+ uint32_t *cpu_to_core,
+ uint32_t *cpu_to_socket,
+ uint32_t *cpu_to_node);
+
+/* Query Xen for the memory NUMA information. The caller is responsible for
+ * ensuring correct hypercall buffering. */
int xc_numainfo(xc_interface *xch, xc_numainfo_t *info);
+/**
+ * Query Xen for the memory NUMA information. The library shall ensure
+ * correct bounce buffering is performed.
+ *
+ * The following parameters behave exactly as described in Xen's public
+ * sysctl.h. Arrays may be NULL if the information is not wanted.
+ *
+ * node_to_mem{size,free} should have (max_node_index + 1) elements
+ * node_to_node_distance should have (max_node_index + 1)^2 elements
+ *
+ * @param [in/out] max_node_index
+ * @param [out] node_to_memsize
+ * @param [out] node_to_memfree
+ * @param [out] node_to_node_distance
+ * @returns 0 on success, -1 and sets errno on error.
+ */
+int xc_numainfo_bounced(xc_interface *xch,
+ uint32_t *max_node_index,
+ uint64_t *node_to_memsize,
+ uint64_t *node_to_memfree,
+ uint32_t *node_to_node_distance);
+
int xc_sched_id(xc_interface *xch,
int *sched_id);
--
1.7.10.4
next prev parent reply other threads:[~2014-02-27 11:11 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-02-27 11:11 [PATCH RFC 0/2] Support for hwloc Andrew Cooper
2014-02-27 11:11 ` Andrew Cooper [this message]
2014-03-12 8:34 ` [PATCH RFC 1/2] tools/libxc: Improved xc_{topology, numa}info functions Dario Faggioli
2014-03-12 10:41 ` Andrew Cooper
2014-03-12 11:00 ` Dario Faggioli
2014-03-14 14:41 ` Ian Campbell
2014-02-27 11:11 ` [PATCH RFC 2/2] SYSCTL subop to execute cpuid on a specified pcpu Andrew Cooper
2014-02-27 11:11 ` [PATCH RFC 2/2] xen/x86: Introduce XEN_SYSCTL_cpuid hypercall Andrew Cooper
2014-02-27 11:58 ` Jan Beulich
2014-02-27 12:11 ` Andrew Cooper
2014-02-27 12:26 ` Jan Beulich
2014-02-27 15:57 ` Andrew Cooper
2014-03-14 14:45 ` Ian Campbell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1393499497-9162-2-git-send-email-andrew.cooper3@citrix.com \
--to=andrew.cooper3@citrix.com \
--cc=Ian.Campbell@citrix.com \
--cc=Ian.Jackson@eu.citrix.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).