From: Elena Ufimtseva <ufimtseva@gmail.com>
To: xen-devel@lists.xen.org
Cc: keir@xen.org, Elena Ufimtseva <ufimtseva@gmail.com>,
stefano.stabellini@eu.citrix.com, george.dunlap@eu.citrix.com,
msw@linux.com, dario.faggioli@citrix.com, lccycc123@gmail.com,
JBeulich@suse.com
Subject: [PATCH RFC v2 2/7] xen/vNUMA: Per-domain vNUMA initialization.
Date: Fri, 13 Sep 2013 04:49:59 -0400 [thread overview]
Message-ID: <1379062199-13759-1-git-send-email-ufimtseva@gmail.com> (raw)
Per-domain vNUMA topology initialization.
domctl hypercall is used to set vNUMA topology
per domU during domain build time.
Signed-off-by: Elena Ufimtseva <ufimtseva@gmail.com>
---
Changes since v1:
* uses hypercall_bounce_pre/post to initialize pointers before
issuing
domctl hypercall;
* fixed type int to unsigned int;
---
tools/libxc/xc_dom.h | 8 +++++++
tools/libxc/xc_domain.c | 57 +++++++++++++++++++++++++++++++++++++++++++++++
tools/libxc/xenctrl.h | 9 ++++++++
3 files changed, 74 insertions(+)
diff --git a/tools/libxc/xc_dom.h b/tools/libxc/xc_dom.h
index 86e23ee..790f145 100644
--- a/tools/libxc/xc_dom.h
+++ b/tools/libxc/xc_dom.h
@@ -114,6 +114,14 @@ struct xc_dom_image {
struct xc_dom_phys *phys_pages;
int realmodearea_log;
+ /* vNUMA topology and memory allocation structure
+ * Defines the way to allocate XEN
+ * memory from phys NUMA nodes by providing mask
+ * vnuma_to_pnuma */
+ uint16_t nr_vnodes;
+ uint64_t *vmemsizes;
+ unsigned int *vnode_to_pnode;
+
/* malloc memory pool */
struct xc_dom_mem *memblocks;
diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index 3257e2a..8837fcc 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -1629,6 +1629,63 @@ int xc_domain_set_virq_handler(xc_interface *xch, uint32_t domid, int virq)
return do_domctl(xch, &domctl);
}
+/* Informs XEN that domain is vNUMA aware */
+int xc_domain_setvnodes(xc_interface *xch,
+ uint32_t domid,
+ uint16_t nr_vnodes,
+ uint16_t nr_vcpus,
+ vnuma_memblk_t *vmemblks,
+ unsigned int *vdistance,
+ unsigned int *vcpu_to_vnode,
+ unsigned int *vnode_to_pnode)
+{
+ int rc;
+ DECLARE_DOMCTL;
+ DECLARE_HYPERCALL_BOUNCE(vmemblks, sizeof(*vmemblks) * nr_vnodes,
+ XC_HYPERCALL_BUFFER_BOUNCE_IN);
+ DECLARE_HYPERCALL_BOUNCE(vdistance, sizeof(*vdistance) * nr_vnodes * nr_vnodes,
+ XC_HYPERCALL_BUFFER_BOUNCE_IN);
+ DECLARE_HYPERCALL_BOUNCE(vcpu_to_vnode, sizeof(*vcpu_to_vnode) * nr_vcpus,
+ XC_HYPERCALL_BUFFER_BOUNCE_IN);
+ DECLARE_HYPERCALL_BOUNCE(vnode_to_pnode, sizeof(*vnode_to_pnode) * nr_vnodes,
+ XC_HYPERCALL_BUFFER_BOUNCE_IN);
+
+ if ( vdistance == NULL || vcpu_to_vnode == NULL || vmemblks == NULL )
+ /* vnode_to_pnode can be null on non-NUMA machines */
+ {
+ PERROR("Parameters are wrong XEN_DOMCTL_setvnumainfo\n");
+ return -EINVAL;
+ }
+
+ rc = -EINVAL;
+
+ if (xc_hypercall_bounce_pre(xch, vmemblks) ||
+ xc_hypercall_bounce_pre(xch, vdistance) ||
+ xc_hypercall_bounce_pre(xch, vcpu_to_vnode) ||
+ xc_hypercall_bounce_pre(xch, vnode_to_pnode))
+ {
+ PERROR("Could not bounce buffer for xc_domain_setvnodes");
+ return rc;
+ }
+
+ set_xen_guest_handle(domctl.u.vnuma.vdistance, vdistance);
+ set_xen_guest_handle(domctl.u.vnuma.vnode_to_pnode, vnode_to_pnode);
+ set_xen_guest_handle(domctl.u.vnuma.vcpu_to_vnode, vcpu_to_vnode);
+ set_xen_guest_handle(domctl.u.vnuma.vnuma_memblks, vmemblks);
+
+ domctl.cmd = XEN_DOMCTL_setvnumainfo;
+ domctl.domain = (domid_t)domid;
+ domctl.u.vnuma.nr_vnodes = nr_vnodes;
+ rc = do_domctl(xch, &domctl);
+
+ xc_hypercall_bounce_post(xch, vmemblks);
+ xc_hypercall_bounce_post(xch, vdistance);
+ xc_hypercall_bounce_post(xch, vcpu_to_vnode);
+ xc_hypercall_bounce_post(xch, vnode_to_pnode);
+
+ return rc;
+}
+
/*
* Local variables:
* mode: C
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
index f2cebaf..41f1233 100644
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -1083,6 +1083,15 @@ int xc_domain_set_memmap_limit(xc_interface *xch,
uint32_t domid,
unsigned long map_limitkb);
+int xc_domain_setvnodes(xc_interface *xch,
+ uint32_t domid,
+ uint16_t nr_vnodes,
+ uint16_t nr_vcpus,
+ vnuma_memblk_t *vmemareas,
+ unsigned int *vdistance,
+ unsigned int *vcpu_to_vnode,
+ unsigned int *vnode_to_pnode);
+
#if defined(__i386__) || defined(__x86_64__)
/*
* PC BIOS standard E820 types and structure.
--
1.7.10.4
next reply other threads:[~2013-09-13 8:49 UTC|newest]
Thread overview: 3+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-09-13 8:49 Elena Ufimtseva [this message]
2013-09-13 13:07 ` [PATCH RFC v2 2/7] xen/vNUMA: Per-domain vNUMA initialization Dario Faggioli
2013-09-16 16:24 ` George Dunlap
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1379062199-13759-1-git-send-email-ufimtseva@gmail.com \
--to=ufimtseva@gmail.com \
--cc=JBeulich@suse.com \
--cc=dario.faggioli@citrix.com \
--cc=george.dunlap@eu.citrix.com \
--cc=keir@xen.org \
--cc=lccycc123@gmail.com \
--cc=msw@linux.com \
--cc=stefano.stabellini@eu.citrix.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).