From: Dario Faggioli <dario.faggioli@citrix.com>
To: Elena Ufimtseva <ufimtseva@gmail.com>
Cc: keir@xen.org, Ian.Campbell@citrix.com,
stefano.stabellini@eu.citrix.com, george.dunlap@eu.citrix.com,
msw@linux.com, lccycc123@gmail.com, ian.jackson@eu.citrix.com,
xen-devel@lists.xen.org, JBeulich@suse.com
Subject: Re: [PATCH v6 01/10] xen: vnuma topology and subop hypercalls
Date: Tue, 22 Jul 2014 17:14:06 +0200 [thread overview]
Message-ID: <1406042046.17850.76.camel@Solace> (raw)
In-Reply-To: <1405662609-31486-2-git-send-email-ufimtseva@gmail.com>
[-- Attachment #1.1: Type: text/plain, Size: 5944 bytes --]
On ven, 2014-07-18 at 01:50 -0400, Elena Ufimtseva wrote:
> diff --git a/xen/common/domain.c b/xen/common/domain.c
> index cd64aea..895584a 100644
> @@ -297,6 +297,144 @@ int vcpuaffinity_params_invalid(const xen_domctl_vcpuaffinity_t *vcpuaff)
> guest_handle_is_null(vcpuaff->cpumap_soft.bitmap));
> }
>
> +/*
> + * Allocates memory for vNUMA, **vnuma should be NULL.
> + * Caller has to make sure that domain has max_pages
> + * and number of vcpus set for domain.
> + * Verifies that single allocation does not exceed
> + * PAGE_SIZE.
> + */
> +static int vnuma_alloc(struct vnuma_info **vnuma,
> + unsigned int nr_vnodes,
> + unsigned int nr_vcpus,
> + unsigned int dist_size)
> +{
> + struct vnuma_info *v;
> +
> + if ( vnuma && *vnuma )
> + return -EINVAL;
> +
> + v = *vnuma;
>
Do you need this? What for?
> + /*
> + * check if any of xmallocs exeeds PAGE_SIZE.
> + * If yes, consider it as an error for now.
>
Do you mind elaborating a bit more on the 'for now'? Why 'for now'?
What's the plan for the future, etc. ...
> + */
> + if ( nr_vnodes > PAGE_SIZE / sizeof(nr_vnodes) ||
> + nr_vcpus > PAGE_SIZE / sizeof(nr_vcpus) ||
> + nr_vnodes > PAGE_SIZE / sizeof(struct vmemrange) ||
> + dist_size > PAGE_SIZE / sizeof(dist_size) )
> + return -EINVAL;
> +
> + v = xzalloc(struct vnuma_info);
> + if ( !v )
> + return -ENOMEM;
> +
> + v->vdistance = xmalloc_array(unsigned int, dist_size);
> + v->vmemrange = xmalloc_array(vmemrange_t, nr_vnodes);
> + v->vcpu_to_vnode = xmalloc_array(unsigned int, nr_vcpus);
> + v->vnode_to_pnode = xmalloc_array(unsigned int, nr_vnodes);
> +
> + if ( v->vdistance == NULL || v->vmemrange == NULL ||
> + v->vcpu_to_vnode == NULL || v->vnode_to_pnode == NULL )
> + {
> + vnuma_destroy(v);
> + return -ENOMEM;
> + }
> +
> + *vnuma = v;
> +
> + return 0;
> +}
> +
> +/*
> + * Allocate memory and construct one vNUMA node,
> + * set default parameters, assign all memory and
> + * vcpus to this node, set distance to 10.
> + */
> +static long vnuma_fallback(const struct domain *d,
> + struct vnuma_info **vnuma)
> +{
> +
I think I agree with Wei, about this fallback not being necessary.
> +/*
> + * construct vNUMA topology form u_vnuma struct and return
> + * it in dst.
> + */
> +long vnuma_init(const struct xen_domctl_vnuma *u_vnuma,
> + const struct domain *d,
> + struct vnuma_info **dst)
> +{
> + unsigned int dist_size, nr_vnodes = 0;
> + long ret;
> + struct vnuma_info *v = NULL;
> +
> + ret = -EINVAL;
> +
Why not initialize 'ret' while defining it?
> + /* If vNUMA topology already set, just exit. */
> + if ( !u_vnuma || *dst )
> + return ret;
> +
> + nr_vnodes = u_vnuma->nr_vnodes;
> +
> + if ( nr_vnodes == 0 )
> + return ret;
> +
> + if ( nr_vnodes > (UINT_MAX / nr_vnodes) )
> + return ret;
> +
Mmmm, do we perhaps want to #define a maximum number of supported vitual
node, put it somewhere in an header, and use it for the check? I mean
something like what we have for the host (in that case, it's called
MAX_NUMNODES).
I mean, if UINT_MAX is 2^64, would it make sense to allow a 2^32 nodes
guest?
> + dist_size = nr_vnodes * nr_vnodes;
> +
> + ret = vnuma_alloc(&v, nr_vnodes, d->max_vcpus, dist_size);
> + if ( ret )
> + return ret;
> +
> + /* On failure, set only one vNUMA node and its success. */
> + ret = 0;
> +
> + if ( copy_from_guest(v->vdistance, u_vnuma->vdistance, dist_size) )
> + goto vnuma_onenode;
> + if ( copy_from_guest(v->vmemrange, u_vnuma->vmemrange, nr_vnodes) )
> + goto vnuma_onenode;
> + if ( copy_from_guest(v->vcpu_to_vnode, u_vnuma->vcpu_to_vnode,
> + d->max_vcpus) )
> + goto vnuma_onenode;
> + if ( copy_from_guest(v->vnode_to_pnode, u_vnuma->vnode_to_pnode,
> + nr_vnodes) )
> + goto vnuma_onenode;
> +
> + v->nr_vnodes = nr_vnodes;
> + *dst = v;
> +
> + return ret;
> +
> +vnuma_onenode:
> + vnuma_destroy(v);
> + return vnuma_fallback(d, dst);
>
As said, just report the error and bail in this case.
> +}
> +
> long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
> {
> long ret = 0;
> @@ -967,6 +1105,35 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
> }
> break;
>
> + case XEN_DOMCTL_setvnumainfo:
> + {
> + struct vnuma_info *v = NULL;
> +
> + ret = -EFAULT;
> + if ( guest_handle_is_null(op->u.vnuma.vdistance) ||
> + guest_handle_is_null(op->u.vnuma.vmemrange) ||
> + guest_handle_is_null(op->u.vnuma.vcpu_to_vnode) ||
> + guest_handle_is_null(op->u.vnuma.vnode_to_pnode) )
> + return ret;
> +
> + ret = -EINVAL;
> +
> + ret = vnuma_init(&op->u.vnuma, d, &v);
>
Rather pointless 'ret=-EINVAL', I would say. :-)
> + if ( ret < 0 || v == NULL )
> + break;
> +
> + /* overwrite vnuma for domain */
> + if ( !d->vnuma )
> + vnuma_destroy(d->vnuma);
> +
> + domain_lock(d);
> + d->vnuma = v;
> + domain_unlock(d);
> +
> + ret = 0;
> + }
> + break;
> +
> default:
> ret = arch_do_domctl(op, d, u_domctl);
> break;
Regards,
Dario
--
<<This happens because I choose it to happen!>> (Raistlin Majere)
-----------------------------------------------------------------
Dario Faggioli, Ph.D, http://about.me/dario.faggioli
Senior Software Engineer, Citrix Systems R&D Ltd., Cambridge (UK)
[-- Attachment #1.2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 181 bytes --]
[-- Attachment #2: Type: text/plain, Size: 126 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
next prev parent reply other threads:[~2014-07-22 15:14 UTC|newest]
Thread overview: 63+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-07-18 5:49 [PATCH v6 00/10] vnuma introduction Elena Ufimtseva
2014-07-18 5:50 ` [PATCH v6 01/10] xen: vnuma topology and subop hypercalls Elena Ufimtseva
2014-07-18 10:30 ` Wei Liu
2014-07-20 13:16 ` Elena Ufimtseva
2014-07-20 15:59 ` Wei Liu
2014-07-22 15:18 ` Dario Faggioli
2014-07-23 5:33 ` Elena Ufimtseva
2014-07-18 13:49 ` Konrad Rzeszutek Wilk
2014-07-20 13:26 ` Elena Ufimtseva
2014-07-22 15:14 ` Dario Faggioli [this message]
2014-07-23 5:22 ` Elena Ufimtseva
2014-07-23 14:06 ` Jan Beulich
2014-07-25 4:52 ` Elena Ufimtseva
2014-07-25 7:33 ` Jan Beulich
2014-07-18 5:50 ` [PATCH v6 02/10] xsm bits for vNUMA hypercalls Elena Ufimtseva
2014-07-18 13:50 ` Konrad Rzeszutek Wilk
2014-07-18 15:26 ` Daniel De Graaf
2014-07-20 13:48 ` Elena Ufimtseva
2014-07-18 5:50 ` [PATCH v6 03/10] vnuma hook to debug-keys u Elena Ufimtseva
2014-07-23 14:10 ` Jan Beulich
2014-07-18 5:50 ` [PATCH v6 04/10] libxc: Introduce xc_domain_setvnuma to set vNUMA Elena Ufimtseva
2014-07-18 10:33 ` Wei Liu
2014-07-29 10:33 ` Ian Campbell
2014-07-18 5:50 ` [PATCH v6 05/10] libxl: vnuma topology configuration parser and doc Elena Ufimtseva
2014-07-18 10:53 ` Wei Liu
2014-07-20 14:04 ` Elena Ufimtseva
2014-07-29 10:38 ` Ian Campbell
2014-07-29 10:42 ` Ian Campbell
2014-08-06 4:46 ` Elena Ufimtseva
2014-07-18 5:50 ` [PATCH v6 06/10] libxc: move code to arch_boot_alloc func Elena Ufimtseva
2014-07-29 10:38 ` Ian Campbell
2014-07-18 5:50 ` [PATCH v6 07/10] libxc: allocate domain memory for vnuma enabled Elena Ufimtseva
2014-07-29 10:43 ` Ian Campbell
2014-08-06 4:48 ` Elena Ufimtseva
2014-07-18 5:50 ` [PATCH v6 08/10] libxl: build numa nodes memory blocks Elena Ufimtseva
2014-07-18 11:01 ` Wei Liu
2014-07-20 12:58 ` Elena Ufimtseva
2014-07-20 15:59 ` Wei Liu
2014-07-18 5:50 ` [PATCH v6 09/10] libxl: vnuma nodes placement bits Elena Ufimtseva
2014-07-18 5:50 ` [PATCH v6 10/10] libxl: set vnuma for domain Elena Ufimtseva
2014-07-18 10:58 ` Wei Liu
2014-07-29 10:45 ` Ian Campbell
2014-08-12 3:52 ` Elena Ufimtseva
2014-08-12 9:42 ` Wei Liu
2014-08-12 17:10 ` Dario Faggioli
2014-08-12 17:13 ` Wei Liu
2014-08-12 17:24 ` Elena Ufimtseva
2014-07-18 6:16 ` [PATCH v6 00/10] vnuma introduction Elena Ufimtseva
2014-07-18 9:53 ` Wei Liu
2014-07-18 10:13 ` Dario Faggioli
2014-07-18 11:48 ` Wei Liu
2014-07-20 14:57 ` Elena Ufimtseva
2014-07-22 15:49 ` Dario Faggioli
2014-07-22 14:03 ` Dario Faggioli
2014-07-22 14:48 ` Wei Liu
2014-07-22 15:06 ` Dario Faggioli
2014-07-22 16:47 ` Wei Liu
2014-07-22 19:43 ` Is: cpuid creation of PV guests is not correct. Was:Re: " Konrad Rzeszutek Wilk
2014-07-22 22:34 ` Is: cpuid creation of PV guests is not correct Andrew Cooper
2014-07-22 22:53 ` Is: cpuid creation of PV guests is not correct. Was:Re: [PATCH v6 00/10] vnuma introduction Dario Faggioli
2014-07-23 6:00 ` Elena Ufimtseva
2014-07-22 12:49 ` Dario Faggioli
2014-07-23 5:59 ` Elena Ufimtseva
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1406042046.17850.76.camel@Solace \
--to=dario.faggioli@citrix.com \
--cc=Ian.Campbell@citrix.com \
--cc=JBeulich@suse.com \
--cc=george.dunlap@eu.citrix.com \
--cc=ian.jackson@eu.citrix.com \
--cc=keir@xen.org \
--cc=lccycc123@gmail.com \
--cc=msw@linux.com \
--cc=stefano.stabellini@eu.citrix.com \
--cc=ufimtseva@gmail.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).