* [PATCH RFC v2 2/7] xen/vNUMA: Per-domain vNUMA initialization.
@ 2013-09-13 8:49 Elena Ufimtseva
2013-09-13 13:07 ` Dario Faggioli
2013-09-16 16:24 ` George Dunlap
0 siblings, 2 replies; 3+ messages in thread
From: Elena Ufimtseva @ 2013-09-13 8:49 UTC (permalink / raw)
To: xen-devel
Cc: keir, Elena Ufimtseva, stefano.stabellini, george.dunlap, msw,
dario.faggioli, lccycc123, JBeulich
Per-domain vNUMA topology initialization.
domctl hypercall is used to set vNUMA topology
per domU during domain build time.
Signed-off-by: Elena Ufimtseva <ufimtseva@gmail.com>
---
Changes since v1:
* uses hypercall_bounce_pre/post to initialize pointers before
issuing
domctl hypercall;
* fixed type int to unsigned int;
---
tools/libxc/xc_dom.h | 8 +++++++
tools/libxc/xc_domain.c | 57 +++++++++++++++++++++++++++++++++++++++++++++++
tools/libxc/xenctrl.h | 9 ++++++++
3 files changed, 74 insertions(+)
diff --git a/tools/libxc/xc_dom.h b/tools/libxc/xc_dom.h
index 86e23ee..790f145 100644
--- a/tools/libxc/xc_dom.h
+++ b/tools/libxc/xc_dom.h
@@ -114,6 +114,14 @@ struct xc_dom_image {
struct xc_dom_phys *phys_pages;
int realmodearea_log;
+ /* vNUMA topology and memory allocation structure
+ * Defines the way to allocate XEN
+ * memory from phys NUMA nodes by providing mask
+ * vnuma_to_pnuma */
+ uint16_t nr_vnodes;
+ uint64_t *vmemsizes;
+ unsigned int *vnode_to_pnode;
+
/* malloc memory pool */
struct xc_dom_mem *memblocks;
diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index 3257e2a..8837fcc 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -1629,6 +1629,63 @@ int xc_domain_set_virq_handler(xc_interface *xch, uint32_t domid, int virq)
return do_domctl(xch, &domctl);
}
+/* Informs XEN that domain is vNUMA aware */
+int xc_domain_setvnodes(xc_interface *xch,
+ uint32_t domid,
+ uint16_t nr_vnodes,
+ uint16_t nr_vcpus,
+ vnuma_memblk_t *vmemblks,
+ unsigned int *vdistance,
+ unsigned int *vcpu_to_vnode,
+ unsigned int *vnode_to_pnode)
+{
+ int rc;
+ DECLARE_DOMCTL;
+ DECLARE_HYPERCALL_BOUNCE(vmemblks, sizeof(*vmemblks) * nr_vnodes,
+ XC_HYPERCALL_BUFFER_BOUNCE_IN);
+ DECLARE_HYPERCALL_BOUNCE(vdistance, sizeof(*vdistance) * nr_vnodes * nr_vnodes,
+ XC_HYPERCALL_BUFFER_BOUNCE_IN);
+ DECLARE_HYPERCALL_BOUNCE(vcpu_to_vnode, sizeof(*vcpu_to_vnode) * nr_vcpus,
+ XC_HYPERCALL_BUFFER_BOUNCE_IN);
+ DECLARE_HYPERCALL_BOUNCE(vnode_to_pnode, sizeof(*vnode_to_pnode) * nr_vnodes,
+ XC_HYPERCALL_BUFFER_BOUNCE_IN);
+
+ if ( vdistance == NULL || vcpu_to_vnode == NULL || vmemblks == NULL )
+ /* vnode_to_pnode can be null on non-NUMA machines */
+ {
+ PERROR("Parameters are wrong XEN_DOMCTL_setvnumainfo\n");
+ return -EINVAL;
+ }
+
+ rc = -EINVAL;
+
+ if (xc_hypercall_bounce_pre(xch, vmemblks) ||
+ xc_hypercall_bounce_pre(xch, vdistance) ||
+ xc_hypercall_bounce_pre(xch, vcpu_to_vnode) ||
+ xc_hypercall_bounce_pre(xch, vnode_to_pnode))
+ {
+ PERROR("Could not bounce buffer for xc_domain_setvnodes");
+ return rc;
+ }
+
+ set_xen_guest_handle(domctl.u.vnuma.vdistance, vdistance);
+ set_xen_guest_handle(domctl.u.vnuma.vnode_to_pnode, vnode_to_pnode);
+ set_xen_guest_handle(domctl.u.vnuma.vcpu_to_vnode, vcpu_to_vnode);
+ set_xen_guest_handle(domctl.u.vnuma.vnuma_memblks, vmemblks);
+
+ domctl.cmd = XEN_DOMCTL_setvnumainfo;
+ domctl.domain = (domid_t)domid;
+ domctl.u.vnuma.nr_vnodes = nr_vnodes;
+ rc = do_domctl(xch, &domctl);
+
+ xc_hypercall_bounce_post(xch, vmemblks);
+ xc_hypercall_bounce_post(xch, vdistance);
+ xc_hypercall_bounce_post(xch, vcpu_to_vnode);
+ xc_hypercall_bounce_post(xch, vnode_to_pnode);
+
+ return rc;
+}
+
/*
* Local variables:
* mode: C
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
index f2cebaf..41f1233 100644
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -1083,6 +1083,15 @@ int xc_domain_set_memmap_limit(xc_interface *xch,
uint32_t domid,
unsigned long map_limitkb);
+int xc_domain_setvnodes(xc_interface *xch,
+ uint32_t domid,
+ uint16_t nr_vnodes,
+ uint16_t nr_vcpus,
+ vnuma_memblk_t *vmemareas,
+ unsigned int *vdistance,
+ unsigned int *vcpu_to_vnode,
+ unsigned int *vnode_to_pnode);
+
#if defined(__i386__) || defined(__x86_64__)
/*
* PC BIOS standard E820 types and structure.
--
1.7.10.4
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH RFC v2 2/7] xen/vNUMA: Per-domain vNUMA initialization.
2013-09-13 8:49 [PATCH RFC v2 2/7] xen/vNUMA: Per-domain vNUMA initialization Elena Ufimtseva
@ 2013-09-13 13:07 ` Dario Faggioli
2013-09-16 16:24 ` George Dunlap
1 sibling, 0 replies; 3+ messages in thread
From: Dario Faggioli @ 2013-09-13 13:07 UTC (permalink / raw)
To: Elena Ufimtseva
Cc: keir, stefano.stabellini, george.dunlap, msw, lccycc123,
xen-devel, JBeulich
[-- Attachment #1.1: Type: text/plain, Size: 735 bytes --]
BTW, Elena, you got Matt's address wrong in most of the patch e-mails.
This one is fine, but all the others have <sw_AT_linux.com>, instead of
msw, which then bounces! :-P
Also, the threading. As I'm sure you've seen, patches are usually sent
as replies to the cover letter. Actually, I'm quite sure it was like
that in your previous submission, while it is not this time.
Not sure what went wrong, and no big deal... just keep an eye on it,
ok? :-P
Dario
--
<<This happens because I choose it to happen!>> (Raistlin Majere)
-----------------------------------------------------------------
Dario Faggioli, Ph.D, http://about.me/dario.faggioli
Senior Software Engineer, Citrix Systems R&D Ltd., Cambridge (UK)
[-- Attachment #1.2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 198 bytes --]
[-- Attachment #2: Type: text/plain, Size: 126 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH RFC v2 2/7] xen/vNUMA: Per-domain vNUMA initialization.
2013-09-13 8:49 [PATCH RFC v2 2/7] xen/vNUMA: Per-domain vNUMA initialization Elena Ufimtseva
2013-09-13 13:07 ` Dario Faggioli
@ 2013-09-16 16:24 ` George Dunlap
1 sibling, 0 replies; 3+ messages in thread
From: George Dunlap @ 2013-09-16 16:24 UTC (permalink / raw)
To: Elena Ufimtseva
Cc: Keir Fraser, Ian Campbell, Stefano Stabellini, Matt Wilson,
Dario Faggioli, Li Yechen, xen-devel@lists.xen.org, Jan Beulich,
Ian Jackson
On Fri, Sep 13, 2013 at 9:49 AM, Elena Ufimtseva <ufimtseva@gmail.com> wrote:
> Per-domain vNUMA topology initialization.
The one-line summary should have more hints about what code this
touches, so the right people notice it; for example:
vNUMA: libxc plumbing to set guest vnodes
Also, you should cc the tools maintainers, Ian Campbell and Ian Jackson.
> diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
> index 3257e2a..8837fcc 100644
> --- a/tools/libxc/xc_domain.c
> +++ b/tools/libxc/xc_domain.c
> @@ -1629,6 +1629,63 @@ int xc_domain_set_virq_handler(xc_interface *xch, uint32_t domid, int virq)
> return do_domctl(xch, &domctl);
> }
>
> +/* Informs XEN that domain is vNUMA aware */
> +int xc_domain_setvnodes(xc_interface *xch,
> + uint32_t domid,
> + uint16_t nr_vnodes,
> + uint16_t nr_vcpus,
> + vnuma_memblk_t *vmemblks,
> + unsigned int *vdistance,
> + unsigned int *vcpu_to_vnode,
> + unsigned int *vnode_to_pnode)
> +{
> + int rc;
> + DECLARE_DOMCTL;
> + DECLARE_HYPERCALL_BOUNCE(vmemblks, sizeof(*vmemblks) * nr_vnodes,
> + XC_HYPERCALL_BUFFER_BOUNCE_IN);
> + DECLARE_HYPERCALL_BOUNCE(vdistance, sizeof(*vdistance) * nr_vnodes * nr_vnodes,
> + XC_HYPERCALL_BUFFER_BOUNCE_IN);
> + DECLARE_HYPERCALL_BOUNCE(vcpu_to_vnode, sizeof(*vcpu_to_vnode) * nr_vcpus,
> + XC_HYPERCALL_BUFFER_BOUNCE_IN);
> + DECLARE_HYPERCALL_BOUNCE(vnode_to_pnode, sizeof(*vnode_to_pnode) * nr_vnodes,
> + XC_HYPERCALL_BUFFER_BOUNCE_IN);
> +
> + if ( vdistance == NULL || vcpu_to_vnode == NULL || vmemblks == NULL )
> + /* vnode_to_pnode can be null on non-NUMA machines */
> + {
> + PERROR("Parameters are wrong XEN_DOMCTL_setvnumainfo\n");
> + return -EINVAL;
> + }
> +
> + rc = -EINVAL;
You might as well be consistent here and set this above, and then do
"return rc" rather than "return -EINVAL".
-George
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2013-09-16 16:24 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-09-13 8:49 [PATCH RFC v2 2/7] xen/vNUMA: Per-domain vNUMA initialization Elena Ufimtseva
2013-09-13 13:07 ` Dario Faggioli
2013-09-16 16:24 ` George Dunlap
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).