* [PATCH 2/7] libxc: Plumb Xen with vNUMA topology for domain.
@ 2013-10-16 22:40 Elena Ufimtseva
2013-11-14 22:56 ` George Dunlap
0 siblings, 1 reply; 2+ messages in thread
From: Elena Ufimtseva @ 2013-10-16 22:40 UTC (permalink / raw)
To: xen-devel
Cc: keir, Elena Ufimtseva, stefano.stabellini, george.dunlap, msw,
dario.faggioli, lccycc123, JBeulich
Per-domain vNUMA topology initialization.
domctl hypercall is used to set vNUMA topology
per domU during domain build time.
Signed-off-by: Elena Ufimtseva <ufimtseva@gmail.com>
---
Changes since RFC v2:
- copy vNUMA topology information in hypercall in one go;
---
tools/libxc/xc_dom.h | 9 ++++++++
tools/libxc/xc_domain.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++
tools/libxc/xenctrl.h | 9 ++++++++
3 files changed, 77 insertions(+)
diff --git a/tools/libxc/xc_dom.h b/tools/libxc/xc_dom.h
index 86e23ee..a271b7c 100644
--- a/tools/libxc/xc_dom.h
+++ b/tools/libxc/xc_dom.h
@@ -114,6 +114,15 @@ struct xc_dom_image {
struct xc_dom_phys *phys_pages;
int realmodearea_log;
+ /*
+ * vNUMA topology and memory allocation structure.
+ * Defines the way to allocate memory on per NUMA
+ * physical nodes that is defined by vnode_to_pnode.
+ */
+ uint16_t nr_vnodes;
+ uint64_t *vnuma_memszs;
+ unsigned int *vnode_to_pnode;
+
/* malloc memory pool */
struct xc_dom_mem *memblocks;
diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index 2cea6e3..6cab681 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -1777,6 +1777,65 @@ int xc_domain_set_max_evtchn(xc_interface *xch, uint32_t domid,
return do_domctl(xch, &domctl);
}
+/* Plumbs Xen with vNUMA topology */
+int xc_domain_setvnodes(xc_interface *xch,
+ uint32_t domid,
+ uint16_t nr_vnodes,
+ uint16_t nr_vcpus,
+ vnuma_memblk_t *vmemblks,
+ unsigned int *vdistance,
+ unsigned int *vcpu_to_vnode,
+ unsigned int *vnode_to_pnode)
+{
+ int rc;
+ DECLARE_DOMCTL;
+ DECLARE_HYPERCALL_BOUNCE(vmemblks, sizeof(*vmemblks) * nr_vnodes,
+ XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+ DECLARE_HYPERCALL_BOUNCE(vdistance, sizeof(*vdistance) *
+ nr_vnodes * nr_vnodes,
+ XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+ DECLARE_HYPERCALL_BOUNCE(vcpu_to_vnode, sizeof(*vcpu_to_vnode) * nr_vcpus,
+ XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+ DECLARE_HYPERCALL_BOUNCE(vnode_to_pnode, sizeof(*vnode_to_pnode) *
+ nr_vnodes,
+ XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+
+ if ( vdistance == NULL || vcpu_to_vnode == NULL ||
+ vmemblks == NULL || vnode_to_pnode == NULL )
+ {
+ PERROR("Incorrect parameters for XEN_DOMCTL_setvnumainfo\n");
+ return -EINVAL;
+ }
+
+ rc = -EINVAL;
+
+ if (xc_hypercall_bounce_pre(xch, vmemblks) ||
+ xc_hypercall_bounce_pre(xch, vdistance) ||
+ xc_hypercall_bounce_pre(xch, vcpu_to_vnode) ||
+ xc_hypercall_bounce_pre(xch, vnode_to_pnode))
+ {
+ PERROR("Could not bounce buffer for xc_domain_setvnodes");
+ return rc;
+ }
+
+ set_xen_guest_handle(domctl.u.vnuma.vnuma_memblks, vmemblks);
+ set_xen_guest_handle(domctl.u.vnuma.vdistance, vdistance);
+ set_xen_guest_handle(domctl.u.vnuma.vcpu_to_vnode, vcpu_to_vnode);
+ set_xen_guest_handle(domctl.u.vnuma.vnode_to_pnode, vnode_to_pnode);
+
+ domctl.cmd = XEN_DOMCTL_setvnumainfo;
+ domctl.domain = (domid_t)domid;
+ domctl.u.vnuma.nr_vnodes = nr_vnodes;
+ rc = do_domctl(xch, &domctl);
+
+ xc_hypercall_bounce_post(xch, vmemblks);
+ xc_hypercall_bounce_post(xch, vdistance);
+ xc_hypercall_bounce_post(xch, vcpu_to_vnode);
+ xc_hypercall_bounce_post(xch, vnode_to_pnode);
+
+ return rc;
+}
+
/*
* Local variables:
* mode: C
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
index 8cf3f3b..3dbd035 100644
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -1108,6 +1108,15 @@ int xc_domain_set_memmap_limit(xc_interface *xch,
uint32_t domid,
unsigned long map_limitkb);
+int xc_domain_setvnodes(xc_interface *xch,
+ uint32_t domid,
+ uint16_t nr_vnodes,
+ uint16_t nr_vcpus,
+ vnuma_memblk_t *vmemareas,
+ unsigned int *vdistance,
+ unsigned int *vcpu_to_vnode,
+ unsigned int *vnode_to_pnode);
+
#if defined(__i386__) || defined(__x86_64__)
/*
* PC BIOS standard E820 types and structure.
--
1.7.10.4
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [PATCH 2/7] libxc: Plumb Xen with vNUMA topology for domain.
2013-10-16 22:40 [PATCH 2/7] libxc: Plumb Xen with vNUMA topology for domain Elena Ufimtseva
@ 2013-11-14 22:56 ` George Dunlap
0 siblings, 0 replies; 2+ messages in thread
From: George Dunlap @ 2013-11-14 22:56 UTC (permalink / raw)
To: Elena Ufimtseva
Cc: keir, stefano.stabellini, msw, dario.faggioli, lccycc123,
xen-devel, JBeulich
On 10/16/2013 11:40 PM, Elena Ufimtseva wrote:
> Per-domain vNUMA topology initialization.
> domctl hypercall is used to set vNUMA topology
> per domU during domain build time.
>
> Signed-off-by: Elena Ufimtseva <ufimtseva@gmail.com>
>
> ---
> Changes since RFC v2:
> - copy vNUMA topology information in hypercall in one go;
> ---
> tools/libxc/xc_dom.h | 9 ++++++++
> tools/libxc/xc_domain.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++
> tools/libxc/xenctrl.h | 9 ++++++++
> 3 files changed, 77 insertions(+)
>
> diff --git a/tools/libxc/xc_dom.h b/tools/libxc/xc_dom.h
> index 86e23ee..a271b7c 100644
> --- a/tools/libxc/xc_dom.h
> +++ b/tools/libxc/xc_dom.h
> @@ -114,6 +114,15 @@ struct xc_dom_image {
> struct xc_dom_phys *phys_pages;
> int realmodearea_log;
>
> + /*
> + * vNUMA topology and memory allocation structure.
> + * Defines the way to allocate memory on per NUMA
> + * physical nodes that is defined by vnode_to_pnode.
> + */
> + uint16_t nr_vnodes;
> + uint64_t *vnuma_memszs;
> + unsigned int *vnode_to_pnode;
This isn't really used in this patch -- I think this should probably be
put in the next patch.
> +
> /* malloc memory pool */
> struct xc_dom_mem *memblocks;
>
> diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
> index 2cea6e3..6cab681 100644
> --- a/tools/libxc/xc_domain.c
> +++ b/tools/libxc/xc_domain.c
> @@ -1777,6 +1777,65 @@ int xc_domain_set_max_evtchn(xc_interface *xch, uint32_t domid,
> return do_domctl(xch, &domctl);
> }
>
> +/* Plumbs Xen with vNUMA topology */
> +int xc_domain_setvnodes(xc_interface *xch,
> + uint32_t domid,
> + uint16_t nr_vnodes,
> + uint16_t nr_vcpus,
> + vnuma_memblk_t *vmemblks,
> + unsigned int *vdistance,
> + unsigned int *vcpu_to_vnode,
> + unsigned int *vnode_to_pnode)
> +{
> + int rc;
> + DECLARE_DOMCTL;
> + DECLARE_HYPERCALL_BOUNCE(vmemblks, sizeof(*vmemblks) * nr_vnodes,
> + XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
> + DECLARE_HYPERCALL_BOUNCE(vdistance, sizeof(*vdistance) *
> + nr_vnodes * nr_vnodes,
> + XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
> + DECLARE_HYPERCALL_BOUNCE(vcpu_to_vnode, sizeof(*vcpu_to_vnode) * nr_vcpus,
> + XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
> + DECLARE_HYPERCALL_BOUNCE(vnode_to_pnode, sizeof(*vnode_to_pnode) *
> + nr_vnodes,
> + XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
> +
> + if ( vdistance == NULL || vcpu_to_vnode == NULL ||
> + vmemblks == NULL || vnode_to_pnode == NULL )
> + {
> + PERROR("Incorrect parameters for XEN_DOMCTL_setvnumainfo\n");
> + return -EINVAL;
> + }
> +
> + rc = -EINVAL;
> +
> + if (xc_hypercall_bounce_pre(xch, vmemblks) ||
> + xc_hypercall_bounce_pre(xch, vdistance) ||
> + xc_hypercall_bounce_pre(xch, vcpu_to_vnode) ||
> + xc_hypercall_bounce_pre(xch, vnode_to_pnode))
> + {
> + PERROR("Could not bounce buffer for xc_domain_setvnodes");
> + return rc;
As Dario said, this should be setting errno and returning -1.
(A bit of a weird convention, but it's as old as Unix, and not worth
breaking at this point.)
Other than that it looks fine to me; but I'm not up on the details of
bounce buffers and what not enough to give a proper reviewed-by.
-George
> + }
> +
> + set_xen_guest_handle(domctl.u.vnuma.vnuma_memblks, vmemblks);
> + set_xen_guest_handle(domctl.u.vnuma.vdistance, vdistance);
> + set_xen_guest_handle(domctl.u.vnuma.vcpu_to_vnode, vcpu_to_vnode);
> + set_xen_guest_handle(domctl.u.vnuma.vnode_to_pnode, vnode_to_pnode);
> +
> + domctl.cmd = XEN_DOMCTL_setvnumainfo;
> + domctl.domain = (domid_t)domid;
> + domctl.u.vnuma.nr_vnodes = nr_vnodes;
> + rc = do_domctl(xch, &domctl);
> +
> + xc_hypercall_bounce_post(xch, vmemblks);
> + xc_hypercall_bounce_post(xch, vdistance);
> + xc_hypercall_bounce_post(xch, vcpu_to_vnode);
> + xc_hypercall_bounce_post(xch, vnode_to_pnode);
> +
> + return rc;
> +}
> +
> /*
> * Local variables:
> * mode: C
> diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
> index 8cf3f3b..3dbd035 100644
> --- a/tools/libxc/xenctrl.h
> +++ b/tools/libxc/xenctrl.h
> @@ -1108,6 +1108,15 @@ int xc_domain_set_memmap_limit(xc_interface *xch,
> uint32_t domid,
> unsigned long map_limitkb);
>
> +int xc_domain_setvnodes(xc_interface *xch,
> + uint32_t domid,
> + uint16_t nr_vnodes,
> + uint16_t nr_vcpus,
> + vnuma_memblk_t *vmemareas,
> + unsigned int *vdistance,
> + unsigned int *vcpu_to_vnode,
> + unsigned int *vnode_to_pnode);
> +
> #if defined(__i386__) || defined(__x86_64__)
> /*
> * PC BIOS standard E820 types and structure.
>
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2013-11-14 22:56 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-10-16 22:40 [PATCH 2/7] libxc: Plumb Xen with vNUMA topology for domain Elena Ufimtseva
2013-11-14 22:56 ` George Dunlap
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).