From: George Dunlap <george.dunlap@eu.citrix.com>
To: Dario Faggioli <raistlin@linux.it>
Cc: Andre Przywara <andre.przywara@amd.com>,
Ian Campbell <Ian.Campbell@citrix.com>,
Stefano Stabellini <Stefano.Stabellini@eu.citrix.com>,
Juergen Gross <juergen.gross@ts.fujitsu.com>,
Ian Jackson <Ian.Jackson@eu.citrix.com>,
"xen-devel@lists.xen.org" <xen-devel@lists.xen.org>,
Jan Beulich <JBeulich@suse.com>
Subject: Re: [PATCH 01 of 10 [RFC]] libxc: Generalize xenctl_cpumap to just xenctl_map
Date: Wed, 11 Apr 2012 17:08:12 +0100 [thread overview]
Message-ID: <4F85AC6C.10407@eu.citrix.com> (raw)
In-Reply-To: <e63c137d9fc551ca941a.1334150268@Solace>
On 11/04/12 14:17, Dario Faggioli wrote:
> In preparation for adding an xc_nodemap_t and its related
> hadling logic. Just add one indirection layer, and try to
> retain the interface as much as possible (although some
> small bits here and there have been affected).
This patch is fine with me on the whole (one comment below), but in this
kind of a patch I think you need to include exactly what it is patch
does. I.e.:
1. Replace xenctl_cpumap with xenctl_map
2. Implement bitmap_to_xenctl_map and the reverse
3. Re-implement cpumask_to_xenctl_map with bitmap_to_xenctl_map and vice
versa.
4. Other than #3, no functional changes.
>
> Signed-off-by: Dario Faggioli<dario.faggioli@citrix.eu.com>
>
> diff --git a/tools/libxc/xc_cpupool.c b/tools/libxc/xc_cpupool.c
> --- a/tools/libxc/xc_cpupool.c
> +++ b/tools/libxc/xc_cpupool.c
> @@ -90,7 +90,7 @@ xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_
> sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO;
> sysctl.u.cpupool_op.cpupool_id = poolid;
> set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
> - sysctl.u.cpupool_op.cpumap.nr_cpus = local_size * 8;
> + sysctl.u.cpupool_op.cpumap.nr_elems = local_size * 8;
>
> err = do_sysctl_save(xch,&sysctl);
>
> @@ -184,7 +184,7 @@ xc_cpumap_t xc_cpupool_freeinfo(xc_inter
> sysctl.cmd = XEN_SYSCTL_cpupool_op;
> sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_FREEINFO;
> set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
> - sysctl.u.cpupool_op.cpumap.nr_cpus = mapsize * 8;
> + sysctl.u.cpupool_op.cpumap.nr_elems = mapsize * 8;
>
> err = do_sysctl_save(xch,&sysctl);
>
> diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
> --- a/tools/libxc/xc_domain.c
> +++ b/tools/libxc/xc_domain.c
> @@ -142,7 +142,7 @@ int xc_vcpu_setaffinity(xc_interface *xc
>
> set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
>
> - domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
> + domctl.u.vcpuaffinity.cpumap.nr_elems = cpusize * 8;
>
> ret = do_domctl(xch,&domctl);
>
> @@ -182,7 +182,7 @@ int xc_vcpu_getaffinity(xc_interface *xc
> domctl.u.vcpuaffinity.vcpu = vcpu;
>
> set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
> - domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
> + domctl.u.vcpuaffinity.cpumap.nr_elems = cpusize * 8;
>
> ret = do_domctl(xch,&domctl);
>
> diff --git a/tools/libxc/xc_tbuf.c b/tools/libxc/xc_tbuf.c
> --- a/tools/libxc/xc_tbuf.c
> +++ b/tools/libxc/xc_tbuf.c
> @@ -134,7 +134,7 @@ int xc_tbuf_set_cpu_mask(xc_interface *x
> bitmap_64_to_byte(bytemap,&mask64, sizeof (mask64) * 8);
>
> set_xen_guest_handle(sysctl.u.tbuf_op.cpu_mask.bitmap, bytemap);
> - sysctl.u.tbuf_op.cpu_mask.nr_cpus = sizeof(bytemap) * 8;
> + sysctl.u.tbuf_op.cpu_mask.nr_elems = sizeof(bytemap) * 8;
>
> ret = do_sysctl(xch,&sysctl);
>
> diff --git a/xen/arch/x86/platform_hypercall.c b/xen/arch/x86/platform_hypercall.c
> --- a/xen/arch/x86/platform_hypercall.c
> +++ b/xen/arch/x86/platform_hypercall.c
> @@ -365,7 +365,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xe
> {
> uint32_t cpu;
> uint64_t idletime, now = NOW();
> - struct xenctl_cpumap ctlmap;
> + struct xenctl_map ctlmap;
> cpumask_var_t cpumap;
> XEN_GUEST_HANDLE(uint8) cpumap_bitmap;
> XEN_GUEST_HANDLE(uint64) idletimes;
> @@ -378,7 +378,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xe
> if ( cpufreq_controller != FREQCTL_dom0_kernel )
> break;
>
> - ctlmap.nr_cpus = op->u.getidletime.cpumap_nr_cpus;
> + ctlmap.nr_elems = op->u.getidletime.cpumap_nr_cpus;
> guest_from_compat_handle(cpumap_bitmap,
> op->u.getidletime.cpumap_bitmap);
> ctlmap.bitmap.p = cpumap_bitmap.p; /* handle -> handle_64 conversion */
> diff --git a/xen/common/domctl.c b/xen/common/domctl.c
> --- a/xen/common/domctl.c
> +++ b/xen/common/domctl.c
> @@ -31,28 +31,29 @@
> static DEFINE_SPINLOCK(domctl_lock);
> DEFINE_SPINLOCK(vcpu_alloc_lock);
>
> -int cpumask_to_xenctl_cpumap(
> - struct xenctl_cpumap *xenctl_cpumap, const cpumask_t *cpumask)
> +int bitmap_to_xenctl_map(struct xenctl_map *xenctl_map,
> + const unsigned long *bitmap,
> + unsigned int nbits)
> {
> unsigned int guest_bytes, copy_bytes, i;
> uint8_t zero = 0;
> int err = 0;
> - uint8_t *bytemap = xmalloc_array(uint8_t, (nr_cpu_ids + 7) / 8);
> + uint8_t *bytemap = xmalloc_array(uint8_t, (nbits + 7) / 8);
>
> if ( !bytemap )
> return -ENOMEM;
>
> - guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
> - copy_bytes = min_t(unsigned int, guest_bytes, (nr_cpu_ids + 7) / 8);
> + guest_bytes = (xenctl_map->nr_elems + 7) / 8;
> + copy_bytes = min_t(unsigned int, guest_bytes, (nbits + 7) / 8);
>
> - bitmap_long_to_byte(bytemap, cpumask_bits(cpumask), nr_cpu_ids);
> + bitmap_long_to_byte(bytemap, bitmap, nbits);
>
> if ( copy_bytes != 0 )
> - if ( copy_to_guest(xenctl_cpumap->bitmap, bytemap, copy_bytes) )
> + if ( copy_to_guest(xenctl_map->bitmap, bytemap, copy_bytes) )
> err = -EFAULT;
>
> for ( i = copy_bytes; !err&& i< guest_bytes; i++ )
> - if ( copy_to_guest_offset(xenctl_cpumap->bitmap, i,&zero, 1) )
> + if ( copy_to_guest_offset(xenctl_map->bitmap, i,&zero, 1) )
> err = -EFAULT;
>
> xfree(bytemap);
> @@ -60,36 +61,58 @@ int cpumask_to_xenctl_cpumap(
> return err;
> }
>
> -int xenctl_cpumap_to_cpumask(
> - cpumask_var_t *cpumask, const struct xenctl_cpumap *xenctl_cpumap)
> +int xenctl_map_to_bitmap(unsigned long *bitmap,
> + const struct xenctl_map *xenctl_map,
> + unsigned int nbits)
> {
> unsigned int guest_bytes, copy_bytes;
> int err = 0;
> - uint8_t *bytemap = xzalloc_array(uint8_t, (nr_cpu_ids + 7) / 8);
> + uint8_t *bytemap = xzalloc_array(uint8_t, (nbits + 7) / 8);
>
> if ( !bytemap )
> return -ENOMEM;
>
> - guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
> - copy_bytes = min_t(unsigned int, guest_bytes, (nr_cpu_ids + 7) / 8);
> + guest_bytes = (xenctl_map->nr_elems + 7) / 8;
> + copy_bytes = min_t(unsigned int, guest_bytes, (nbits + 7) / 8);
>
> if ( copy_bytes != 0 )
> {
> - if ( copy_from_guest(bytemap, xenctl_cpumap->bitmap, copy_bytes) )
> + if ( copy_from_guest(bytemap, xenctl_map->bitmap, copy_bytes) )
> err = -EFAULT;
> - if ( (xenctl_cpumap->nr_cpus& 7)&& (guest_bytes<= sizeof(bytemap)) )
> - bytemap[guest_bytes-1]&= ~(0xff<< (xenctl_cpumap->nr_cpus& 7));
> + if ( (xenctl_map->nr_elems& 7)&& (guest_bytes<= sizeof(bytemap)) )
> + bytemap[guest_bytes-1]&= ~(0xff<< (xenctl_map->nr_elems& 7));
> }
>
> - if ( err )
> - /* nothing */;
> - else if ( alloc_cpumask_var(cpumask) )
> - bitmap_byte_to_long(cpumask_bits(*cpumask), bytemap, nr_cpu_ids);
> + if ( !err )
> + bitmap_byte_to_long(bitmap, bytemap, nbits);
> +
> + xfree(bytemap);
> +
> + return err;
> +}
> +
> +int cpumask_to_xenctl_cpumap(struct xenctl_map *xenctl_cpumap,
> + const cpumask_t *cpumask)
> +{
> + return bitmap_to_xenctl_map(xenctl_cpumap, cpumask_bits(cpumask),
> + nr_cpu_ids);
> +}
> +
> +int xenctl_cpumap_to_cpumask(cpumask_var_t *cpumask,
> + const struct xenctl_map *xenctl_cpumap)
> +{
> + int err = 0;
> +
> + if ( alloc_cpumask_var(cpumask) ) {
> + err = xenctl_map_to_bitmap(cpumask_bits(*cpumask), xenctl_cpumap,
> + nr_cpu_ids);
> + /* In case of error, cleanup is up to us, as the caller won't care! */
> + if ( err )
> + free_cpumask_var(*cpumask);
> + }
> else
> err = -ENOMEM;
>
> - xfree(bytemap);
> -
> return err;
> }
>
> diff --git a/xen/include/public/arch-x86/xen-mca.h b/xen/include/public/arch-x86/xen-mca.h
> --- a/xen/include/public/arch-x86/xen-mca.h
> +++ b/xen/include/public/arch-x86/xen-mca.h
> @@ -414,7 +414,7 @@ struct xen_mc_mceinject {
>
> struct xen_mc_inject_v2 {
> uint32_t flags;
> - struct xenctl_cpumap cpumap;
> + struct xenctl_map cpumap;
> };
> #endif
>
> diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
> --- a/xen/include/public/domctl.h
> +++ b/xen/include/public/domctl.h
> @@ -283,7 +283,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_getvc
> /* XEN_DOMCTL_getvcpuaffinity */
> struct xen_domctl_vcpuaffinity {
> uint32_t vcpu; /* IN */
> - struct xenctl_cpumap cpumap; /* IN/OUT */
> + struct xenctl_map cpumap; /* IN/OUT */
> };
> typedef struct xen_domctl_vcpuaffinity xen_domctl_vcpuaffinity_t;
> DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuaffinity_t);
> diff --git a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h
> --- a/xen/include/public/sysctl.h
> +++ b/xen/include/public/sysctl.h
> @@ -71,8 +71,8 @@ struct xen_sysctl_tbuf_op {
> #define XEN_SYSCTL_TBUFOP_disable 5
> uint32_t cmd;
> /* IN/OUT variables */
> - struct xenctl_cpumap cpu_mask;
> - uint32_t evt_mask;
> + struct xenctl_map cpu_mask;
> + uint32_t evt_mask;
> /* OUT variables */
> uint64_aligned_t buffer_mfn;
> uint32_t size; /* Also an IN variable! */
> @@ -531,7 +531,7 @@ struct xen_sysctl_cpupool_op {
> uint32_t domid; /* IN: M */
> uint32_t cpu; /* IN: AR */
> uint32_t n_dom; /* OUT: I */
> - struct xenctl_cpumap cpumap; /* OUT: IF */
> + struct xenctl_map cpumap; /* OUT: IF */
> };
> typedef struct xen_sysctl_cpupool_op xen_sysctl_cpupool_op_t;
> DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpupool_op_t);
> diff --git a/xen/include/public/xen.h b/xen/include/public/xen.h
> --- a/xen/include/public/xen.h
> +++ b/xen/include/public/xen.h
> @@ -822,9 +822,9 @@ typedef uint8_t xen_domain_handle_t[16];
> #endif
>
> #ifndef __ASSEMBLY__
> -struct xenctl_cpumap {
> +struct xenctl_map {
> XEN_GUEST_HANDLE_64(uint8) bitmap;
> - uint32_t nr_cpus;
> + uint32_t nr_elems;
> };
> #endif
>
> diff --git a/xen/include/xen/cpumask.h b/xen/include/xen/cpumask.h
> --- a/xen/include/xen/cpumask.h
> +++ b/xen/include/xen/cpumask.h
> @@ -424,8 +424,8 @@ extern cpumask_t cpu_present_map;
> #define for_each_present_cpu(cpu) for_each_cpu(cpu,&cpu_present_map)
>
> /* Copy to/from cpumap provided by control tools. */
> -struct xenctl_cpumap;
> -int cpumask_to_xenctl_cpumap(struct xenctl_cpumap *, const cpumask_t *);
> -int xenctl_cpumap_to_cpumask(cpumask_var_t *, const struct xenctl_cpumap *);
> +struct xenctl_map;
> +int cpumask_to_xenctl_cpumap(struct xenctl_map *, const cpumask_t *);
> +int xenctl_cpumap_to_cpumask(cpumask_var_t *, const struct xenctl_map *);
You should probably s/cpumap/map/; in the function names as well.
>
> #endif /* __XEN_CPUMASK_H */
> diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst
> --- a/xen/include/xlat.lst
> +++ b/xen/include/xlat.lst
> @@ -2,7 +2,7 @@
> # ! - needs translation
> # ? - needs checking
> ? dom0_vga_console_info xen.h
> -? xenctl_cpumap xen.h
> +? xenctl_map xen.h
> ? mmu_update xen.h
> ! mmuext_op xen.h
> ! start_info xen.h
next prev parent reply other threads:[~2012-04-11 16:08 UTC|newest]
Thread overview: 37+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-04-11 13:17 [PATCH 00 of 10 [RFC]] Automatically place guest on host's NUMA nodes with xl Dario Faggioli
2012-04-11 13:17 ` [PATCH 01 of 10 [RFC]] libxc: Generalize xenctl_cpumap to just xenctl_map Dario Faggioli
2012-04-11 16:08 ` George Dunlap [this message]
2012-04-11 16:31 ` Dario Faggioli
2012-04-11 16:41 ` Dario Faggioli
2012-04-11 13:17 ` [PATCH 02 of 10 [RFC]] libxl: Generalize libxl_cpumap to just libxl_map Dario Faggioli
2012-04-11 13:17 ` [PATCH 03 of 10 [RFC]] libxc, libxl: Introduce xc_nodemap_t and libxl_nodemap Dario Faggioli
2012-04-11 16:38 ` George Dunlap
2012-04-11 16:57 ` Dario Faggioli
2012-04-11 13:17 ` [PATCH 04 of 10 [RFC]] libxl: Introduce libxl_get_numainfo() calling xc_numainfo() Dario Faggioli
2012-04-11 13:17 ` [PATCH 05 of 10 [RFC]] xl: Explicit node affinity specification for guests via config file Dario Faggioli
2012-04-12 10:24 ` George Dunlap
2012-04-12 10:48 ` David Vrabel
2012-04-12 22:25 ` Dario Faggioli
2012-04-12 11:32 ` Formatting of emails which are comments on patches Ian Jackson
2012-04-12 11:42 ` George Dunlap
2012-04-12 22:21 ` [PATCH 05 of 10 [RFC]] xl: Explicit node affinity specification for guests via config file Dario Faggioli
2012-04-11 13:17 ` [PATCH 06 of 10 [RFC]] xl: Allow user to set or change node affinity on-line Dario Faggioli
2012-04-12 10:29 ` George Dunlap
2012-04-12 21:57 ` Dario Faggioli
2012-04-11 13:17 ` [PATCH 07 of 10 [RFC]] sched_credit: Let the scheduler know about `node affinity` Dario Faggioli
2012-04-12 23:06 ` Dario Faggioli
2012-04-27 14:45 ` George Dunlap
2012-05-02 15:13 ` Dario Faggioli
2012-04-11 13:17 ` [PATCH 08 of 10 [RFC]] xl: Introduce First Fit memory-wise placement of guests on nodes Dario Faggioli
2012-05-01 15:45 ` George Dunlap
2012-05-02 16:30 ` Dario Faggioli
2012-05-03 1:03 ` Dario Faggioli
2012-05-03 8:10 ` Ian Campbell
2012-05-03 10:16 ` George Dunlap
2012-05-03 13:41 ` George Dunlap
2012-05-03 14:58 ` Dario Faggioli
2012-04-11 13:17 ` [PATCH 09 of 10 [RFC]] xl: Introduce Best and Worst Fit guest placement algorithms Dario Faggioli
2012-04-16 10:29 ` Dario Faggioli
2012-04-11 13:17 ` [PATCH 10 of 10 [RFC]] xl: Some automatic NUMA placement documentation Dario Faggioli
2012-04-12 9:11 ` Ian Campbell
2012-04-12 10:32 ` Dario Faggioli
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=4F85AC6C.10407@eu.citrix.com \
--to=george.dunlap@eu.citrix.com \
--cc=Ian.Campbell@citrix.com \
--cc=Ian.Jackson@eu.citrix.com \
--cc=JBeulich@suse.com \
--cc=Stefano.Stabellini@eu.citrix.com \
--cc=andre.przywara@amd.com \
--cc=juergen.gross@ts.fujitsu.com \
--cc=raistlin@linux.it \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).