xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Dario Faggioli <raistlin@linux.it>
To: xen-devel@lists.xen.org
Cc: Andre Przywara <andre.przywara@amd.com>,
	Ian Campbell <Ian.Campbell@citrix.com>,
	Stefano Stabellini <Stefano.Stabellini@eu.citrix.com>,
	George Dunlap <george.dunlap@eu.citrix.com>,
	Juergen Gross <juergen.gross@ts.fujitsu.com>,
	Ian Jackson <Ian.Jackson@eu.citrix.com>,
	Jan Beulich <JBeulich@suse.com>
Subject: [PATCH 02 of 10 [RFC]] libxl: Generalize libxl_cpumap to just libxl_map
Date: Wed, 11 Apr 2012 15:17:49 +0200	[thread overview]
Message-ID: <3edc8654216a90043310.1334150269@Solace> (raw)
In-Reply-To: <patchbomb.1334150267@Solace>

In preparation for adding a libxl_nodemap and its related
hadling logic. No changes to the interface this time.

Signed-off-by: Dario Faggioli <dario.faggioli@citrix.eu.com>

diff --git a/tools/libxl/libxl.h b/tools/libxl/libxl.h
--- a/tools/libxl/libxl.h
+++ b/tools/libxl/libxl.h
@@ -277,11 +277,23 @@ typedef uint32_t libxl_hwcap[8];
 
 typedef uint64_t libxl_ev_user;
 
-typedef struct {
+struct libxl_map {
     uint32_t size;          /* number of bytes in map */
     uint8_t *map;
-} libxl_cpumap;
-void libxl_cpumap_dispose(libxl_cpumap *map);
+};
+void libxl_map_dispose(struct libxl_map *map);
+
+typedef struct libxl_map libxl_cpumap;
+static inline void libxl_cpumap_dispose(libxl_cpumap *cpumap)
+{
+    return libxl_map_dispose(cpumap);
+}
+
+typedef struct libxl_map libxl_nodemap;
+static inline void libxl_nodemap_dispose(libxl_nodemap *nodemap)
+{
+    return libxl_map_dispose(nodemap);
+}
 
 typedef struct {
     /*
@@ -474,6 +486,9 @@ int libxl_domain_preserve(libxl_ctx *ctx
 /* get max. number of cpus supported by hypervisor */
 int libxl_get_max_cpus(libxl_ctx *ctx);
 
+/* get max. number of NUMA nodes supported by hypervisor */
+int libxl_get_max_nodes(libxl_ctx *ctx);
+
 /*
  * Run the configured bootloader for a PV domain and update
  * info->kernel, info->u.pv.ramdisk and info->u.pv.cmdline as
diff --git a/tools/libxl/libxl_utils.c b/tools/libxl/libxl_utils.c
--- a/tools/libxl/libxl_utils.c
+++ b/tools/libxl/libxl_utils.c
@@ -437,47 +437,53 @@ int libxl_mac_to_device_nic(libxl_ctx *c
     return rc;
 }
 
+void libxl_map_dispose(struct libxl_map *map)
+{
+    free(map->map);
+}
+
+static int libxl_map_alloc(libxl_ctx *ctx, struct libxl_map *map, int n_elems)
+{
+    int sz;
+
+    sz = (n_elems + 7) / 8;
+    map->map = calloc(sz, sizeof(*map->map));
+    if (!map->map)
+        return ERROR_NOMEM;
+    map->size = sz;
+    return 0;
+}
+
+int libxl_map_test(struct libxl_map *map, int elem)
+{
+    if (elem >= map->size * 8)
+        return 0;
+    return (map->map[elem / 8] & (1 << (elem & 7))) ? 1 : 0;
+}
+
+void libxl_map_set(struct libxl_map *map, int elem)
+{
+    if (elem >= map->size * 8)
+        return;
+    map->map[elem / 8] |= 1 << (elem & 7);
+}
+
+void libxl_map_reset(struct libxl_map *map, int elem)
+{
+    if (elem >= map->size * 8)
+        return;
+    map->map[elem / 8] &= ~(1 << (elem & 7));
+}
+
 int libxl_cpumap_alloc(libxl_ctx *ctx, libxl_cpumap *cpumap)
 {
     int max_cpus;
-    int sz;
 
     max_cpus = libxl_get_max_cpus(ctx);
     if (max_cpus == 0)
         return ERROR_FAIL;
 
-    sz = (max_cpus + 7) / 8;
-    cpumap->map = calloc(sz, sizeof(*cpumap->map));
-    if (!cpumap->map)
-        return ERROR_NOMEM;
-    cpumap->size = sz;
-    return 0;
-}
-
-void libxl_cpumap_dispose(libxl_cpumap *map)
-{
-    free(map->map);
-}
-
-int libxl_cpumap_test(libxl_cpumap *cpumap, int cpu)
-{
-    if (cpu >= cpumap->size * 8)
-        return 0;
-    return (cpumap->map[cpu / 8] & (1 << (cpu & 7))) ? 1 : 0;
-}
-
-void libxl_cpumap_set(libxl_cpumap *cpumap, int cpu)
-{
-    if (cpu >= cpumap->size * 8)
-        return;
-    cpumap->map[cpu / 8] |= 1 << (cpu & 7);
-}
-
-void libxl_cpumap_reset(libxl_cpumap *cpumap, int cpu)
-{
-    if (cpu >= cpumap->size * 8)
-        return;
-    cpumap->map[cpu / 8] &= ~(1 << (cpu & 7));
+    return libxl_map_alloc(ctx, cpumap, max_cpus);
 }
 
 int libxl_get_max_cpus(libxl_ctx *ctx)
diff --git a/tools/libxl/libxl_utils.h b/tools/libxl/libxl_utils.h
--- a/tools/libxl/libxl_utils.h
+++ b/tools/libxl/libxl_utils.h
@@ -64,21 +64,46 @@ int libxl_devid_to_device_nic(libxl_ctx 
 int libxl_vdev_to_device_disk(libxl_ctx *ctx, uint32_t domid, const char *vdev,
                                libxl_device_disk *disk);
 
+int libxl_map_test(struct libxl_map *map, int elem);
+void libxl_map_set(struct libxl_map *map, int elem);
+void libxl_map_reset(struct libxl_map *map, int elem);
+static inline void libxl_map_set_any(struct libxl_map *map)
+{
+    memset(map->map, -1, map->size);
+}
+static inline void libxl_map_set_none(struct libxl_map *map)
+{
+    memset(map->map, 0, map->size);
+}
+static inline int libxl_map_elem_valid(struct libxl_map *map, int elem)
+{
+    return elem >= 0 && elem < (map->size * 8);
+}
+
 int libxl_cpumap_alloc(libxl_ctx *ctx, libxl_cpumap *cpumap);
-int libxl_cpumap_test(libxl_cpumap *cpumap, int cpu);
-void libxl_cpumap_set(libxl_cpumap *cpumap, int cpu);
-void libxl_cpumap_reset(libxl_cpumap *cpumap, int cpu);
+static inline int libxl_cpumap_test(libxl_cpumap *cpumap, int cpu)
+{
+    return libxl_map_test(cpumap, cpu);
+}
+static inline void libxl_cpumap_set(libxl_cpumap *cpumap, int cpu)
+{
+    libxl_map_set(cpumap, cpu);
+}
+static inline void libxl_cpumap_reset(libxl_cpumap *cpumap, int cpu)
+{
+    libxl_map_reset(cpumap, cpu);
+}
 static inline void libxl_cpumap_set_any(libxl_cpumap *cpumap)
 {
-    memset(cpumap->map, -1, cpumap->size);
+    libxl_map_set_any(cpumap);
 }
 static inline void libxl_cpumap_set_none(libxl_cpumap *cpumap)
 {
-    memset(cpumap->map, 0, cpumap->size);
+    libxl_map_set_none(cpumap);
 }
 static inline int libxl_cpumap_cpu_valid(libxl_cpumap *cpumap, int cpu)
 {
-    return cpu >= 0 && cpu < (cpumap->size * 8);
+    return libxl_map_elem_valid(cpumap, cpu);
 }
 #define libxl_for_each_cpu(var, map) for (var = 0; var < (map).size * 8; var++)
 #define libxl_for_each_set_cpu(v, m) for (v = 0; v < (m).size * 8; v++) \

  parent reply	other threads:[~2012-04-11 13:17 UTC|newest]

Thread overview: 37+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2012-04-11 13:17 [PATCH 00 of 10 [RFC]] Automatically place guest on host's NUMA nodes with xl Dario Faggioli
2012-04-11 13:17 ` [PATCH 01 of 10 [RFC]] libxc: Generalize xenctl_cpumap to just xenctl_map Dario Faggioli
2012-04-11 16:08   ` George Dunlap
2012-04-11 16:31     ` Dario Faggioli
2012-04-11 16:41       ` Dario Faggioli
2012-04-11 13:17 ` Dario Faggioli [this message]
2012-04-11 13:17 ` [PATCH 03 of 10 [RFC]] libxc, libxl: Introduce xc_nodemap_t and libxl_nodemap Dario Faggioli
2012-04-11 16:38   ` George Dunlap
2012-04-11 16:57     ` Dario Faggioli
2012-04-11 13:17 ` [PATCH 04 of 10 [RFC]] libxl: Introduce libxl_get_numainfo() calling xc_numainfo() Dario Faggioli
2012-04-11 13:17 ` [PATCH 05 of 10 [RFC]] xl: Explicit node affinity specification for guests via config file Dario Faggioli
2012-04-12 10:24   ` George Dunlap
2012-04-12 10:48     ` David Vrabel
2012-04-12 22:25       ` Dario Faggioli
2012-04-12 11:32     ` Formatting of emails which are comments on patches Ian Jackson
2012-04-12 11:42       ` George Dunlap
2012-04-12 22:21     ` [PATCH 05 of 10 [RFC]] xl: Explicit node affinity specification for guests via config file Dario Faggioli
2012-04-11 13:17 ` [PATCH 06 of 10 [RFC]] xl: Allow user to set or change node affinity on-line Dario Faggioli
2012-04-12 10:29   ` George Dunlap
2012-04-12 21:57     ` Dario Faggioli
2012-04-11 13:17 ` [PATCH 07 of 10 [RFC]] sched_credit: Let the scheduler know about `node affinity` Dario Faggioli
2012-04-12 23:06   ` Dario Faggioli
2012-04-27 14:45   ` George Dunlap
2012-05-02 15:13     ` Dario Faggioli
2012-04-11 13:17 ` [PATCH 08 of 10 [RFC]] xl: Introduce First Fit memory-wise placement of guests on nodes Dario Faggioli
2012-05-01 15:45   ` George Dunlap
2012-05-02 16:30     ` Dario Faggioli
2012-05-03  1:03       ` Dario Faggioli
2012-05-03  8:10         ` Ian Campbell
2012-05-03 10:16         ` George Dunlap
2012-05-03 13:41       ` George Dunlap
2012-05-03 14:58         ` Dario Faggioli
2012-04-11 13:17 ` [PATCH 09 of 10 [RFC]] xl: Introduce Best and Worst Fit guest placement algorithms Dario Faggioli
2012-04-16 10:29   ` Dario Faggioli
2012-04-11 13:17 ` [PATCH 10 of 10 [RFC]] xl: Some automatic NUMA placement documentation Dario Faggioli
2012-04-12  9:11   ` Ian Campbell
2012-04-12 10:32     ` Dario Faggioli

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=3edc8654216a90043310.1334150269@Solace \
    --to=raistlin@linux.it \
    --cc=Ian.Campbell@citrix.com \
    --cc=Ian.Jackson@eu.citrix.com \
    --cc=JBeulich@suse.com \
    --cc=Stefano.Stabellini@eu.citrix.com \
    --cc=andre.przywara@amd.com \
    --cc=george.dunlap@eu.citrix.com \
    --cc=juergen.gross@ts.fujitsu.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).