From: Dario Faggioli <raistlin@linux.it>
To: xen-devel@lists.xen.org
Cc: Andre Przywara <andre.przywara@amd.com>,
Ian Campbell <Ian.Campbell@citrix.com>,
Stefano Stabellini <Stefano.Stabellini@eu.citrix.com>,
George Dunlap <george.dunlap@eu.citrix.com>,
Juergen Gross <juergen.gross@ts.fujitsu.com>,
Ian Jackson <Ian.Jackson@eu.citrix.com>
Subject: [PATCH 05 of 10 v2] libxl: rename libxl_cpumap to libxl_bitmap
Date: Fri, 15 Jun 2012 19:04:33 +0200 [thread overview]
Message-ID: <5d3cbf2e6370d1989bcd.1339779873@Solace> (raw)
In-Reply-To: <patchbomb.1339779868@Solace>
And leave to the caller the burden of knowing and remembering what kind
of bitmap each instance of libxl_bitmap is.
This is basically just some s/libxl_cpumap/libxl_bitmap/ (and some other
related interface name substitution, e.g., libxl_for_each_cpu) in a bunch
of files, with no real functional change involved.
A specific allocation helper is introduced, besides libxl_bitmap_alloc().
It is called libxl_cpu_bitmap_alloc() and is meant at substituting the old
libxl_cpumap_alloc(). It is just something easier to use in cases where one
wants to allocate a libxl_bitmap that is going to serve as a cpu map.
This is because we want to be able to deal with both cpu and NUMA node
maps, but we don't want to duplicate all the various helpers and wrappers.
Signed-off-by: Dario Faggioli <dario.faggioli@citrix.eu.com>
Changes from v1:
* this patch replaces "libxl: abstract libxl_cpumap to just libxl_map"
as it directly change the name of the old type instead of adding one
more abstraction layer.
diff --git a/tools/libxl/gentest.py b/tools/libxl/gentest.py
--- a/tools/libxl/gentest.py
+++ b/tools/libxl/gentest.py
@@ -20,7 +20,7 @@ def randomize_case(s):
def randomize_enum(e):
return random.choice([v.name for v in e.values])
-handcoded = ["libxl_cpumap", "libxl_key_value_list",
+handcoded = ["libxl_bitmap", "libxl_key_value_list",
"libxl_cpuid_policy_list", "libxl_string_list"]
def gen_rand_init(ty, v, indent = " ", parent = None):
@@ -117,16 +117,16 @@ static void rand_bytes(uint8_t *p, size_
p[i] = rand() % 256;
}
-static void libxl_cpumap_rand_init(libxl_cpumap *cpumap)
+static void libxl_bitmap_rand_init(libxl_bitmap *bitmap)
{
int i;
- cpumap->size = rand() % 16;
- cpumap->map = calloc(cpumap->size, sizeof(*cpumap->map));
- libxl_for_each_cpu(i, *cpumap) {
+ bitmap->size = rand() % 16;
+ bitmap->map = calloc(bitmap->size, sizeof(*bitmap->map));
+ libxl_for_each_bit(i, *bitmap) {
if (rand() % 2)
- libxl_cpumap_set(cpumap, i);
+ libxl_bitmap_set(bitmap, i);
else
- libxl_cpumap_reset(cpumap, i);
+ libxl_bitmap_reset(bitmap, i);
}
}
diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
--- a/tools/libxl/libxl.c
+++ b/tools/libxl/libxl.c
@@ -570,7 +570,7 @@ static int cpupool_info(libxl__gc *gc,
info->poolid = xcinfo->cpupool_id;
info->sched = xcinfo->sched_id;
info->n_dom = xcinfo->n_dom;
- if (libxl_cpumap_alloc(CTX, &info->cpumap))
+ if (libxl_cpu_bitmap_alloc(CTX, &info->cpumap))
goto out;
memcpy(info->cpumap.map, xcinfo->cpumap, info->cpumap.size);
@@ -3352,7 +3352,7 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ct
}
for (*nb_vcpu = 0; *nb_vcpu <= domaininfo.max_vcpu_id; ++*nb_vcpu, ++ptr) {
- if (libxl_cpumap_alloc(ctx, &ptr->cpumap)) {
+ if (libxl_cpu_bitmap_alloc(ctx, &ptr->cpumap)) {
LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "allocating cpumap");
return NULL;
}
@@ -3375,7 +3375,7 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ct
}
int libxl_set_vcpuaffinity(libxl_ctx *ctx, uint32_t domid, uint32_t vcpuid,
- libxl_cpumap *cpumap)
+ libxl_bitmap *cpumap)
{
if (xc_vcpu_setaffinity(ctx->xch, domid, vcpuid, cpumap->map)) {
LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "setting vcpu affinity");
@@ -3385,7 +3385,7 @@ int libxl_set_vcpuaffinity(libxl_ctx *ct
}
int libxl_set_vcpuaffinity_all(libxl_ctx *ctx, uint32_t domid,
- unsigned int max_vcpus, libxl_cpumap *cpumap)
+ unsigned int max_vcpus, libxl_bitmap *cpumap)
{
int i, rc = 0;
@@ -3399,7 +3399,7 @@ int libxl_set_vcpuaffinity_all(libxl_ctx
return rc;
}
-int libxl_set_vcpuonline(libxl_ctx *ctx, uint32_t domid, libxl_cpumap *cpumap)
+int libxl_set_vcpuonline(libxl_ctx *ctx, uint32_t domid, libxl_bitmap *cpumap)
{
GC_INIT(ctx);
libxl_dominfo info;
@@ -3419,7 +3419,7 @@ retry_transaction:
for (i = 0; i <= info.vcpu_max_id; i++)
libxl__xs_write(gc, t,
libxl__sprintf(gc, "%s/cpu/%u/availability", dompath, i),
- "%s", libxl_cpumap_test(cpumap, i) ? "online" : "offline");
+ "%s", libxl_bitmap_test(cpumap, i) ? "online" : "offline");
if (!xs_transaction_end(ctx->xsh, t, 0)) {
if (errno == EAGAIN)
goto retry_transaction;
@@ -4015,7 +4015,7 @@ int libxl_tmem_freeable(libxl_ctx *ctx)
return rc;
}
-int libxl_get_freecpus(libxl_ctx *ctx, libxl_cpumap *cpumap)
+int libxl_get_freecpus(libxl_ctx *ctx, libxl_bitmap *cpumap)
{
int ncpus;
@@ -4034,7 +4034,7 @@ int libxl_get_freecpus(libxl_ctx *ctx, l
int libxl_cpupool_create(libxl_ctx *ctx, const char *name,
libxl_scheduler sched,
- libxl_cpumap cpumap, libxl_uuid *uuid,
+ libxl_bitmap cpumap, libxl_uuid *uuid,
uint32_t *poolid)
{
GC_INIT(ctx);
@@ -4057,8 +4057,8 @@ int libxl_cpupool_create(libxl_ctx *ctx,
return ERROR_FAIL;
}
- libxl_for_each_cpu(i, cpumap)
- if (libxl_cpumap_test(&cpumap, i)) {
+ libxl_for_each_bit(i, cpumap)
+ if (libxl_bitmap_test(&cpumap, i)) {
rc = xc_cpupool_addcpu(ctx->xch, *poolid, i);
if (rc) {
LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
@@ -4093,7 +4093,7 @@ int libxl_cpupool_destroy(libxl_ctx *ctx
int rc, i;
xc_cpupoolinfo_t *info;
xs_transaction_t t;
- libxl_cpumap cpumap;
+ libxl_bitmap cpumap;
info = xc_cpupool_getinfo(ctx->xch, poolid);
if (info == NULL) {
@@ -4106,12 +4106,12 @@ int libxl_cpupool_destroy(libxl_ctx *ctx
goto out;
rc = ERROR_NOMEM;
- if (libxl_cpumap_alloc(ctx, &cpumap))
+ if (libxl_cpu_bitmap_alloc(ctx, &cpumap))
goto out;
memcpy(cpumap.map, info->cpumap, cpumap.size);
- libxl_for_each_cpu(i, cpumap)
- if (libxl_cpumap_test(&cpumap, i)) {
+ libxl_for_each_bit(i, cpumap)
+ if (libxl_bitmap_test(&cpumap, i)) {
rc = xc_cpupool_removecpu(ctx->xch, poolid, i);
if (rc) {
LIBXL__LOG_ERRNOVAL(ctx, LIBXL__LOG_ERROR, rc,
@@ -4140,7 +4140,7 @@ int libxl_cpupool_destroy(libxl_ctx *ctx
rc = 0;
out1:
- libxl_cpumap_dispose(&cpumap);
+ libxl_bitmap_dispose(&cpumap);
out:
xc_cpupool_infofree(ctx->xch, info);
GC_FREE;
@@ -4208,7 +4208,7 @@ int libxl_cpupool_cpuadd_node(libxl_ctx
{
int rc = 0;
int cpu, nr;
- libxl_cpumap freemap;
+ libxl_bitmap freemap;
libxl_cputopology *topology;
if (libxl_get_freecpus(ctx, &freemap)) {
@@ -4223,7 +4223,7 @@ int libxl_cpupool_cpuadd_node(libxl_ctx
*cpus = 0;
for (cpu = 0; cpu < nr; cpu++) {
- if (libxl_cpumap_test(&freemap, cpu) && (topology[cpu].node == node) &&
+ if (libxl_bitmap_test(&freemap, cpu) && (topology[cpu].node == node) &&
!libxl_cpupool_cpuadd(ctx, poolid, cpu)) {
(*cpus)++;
}
@@ -4232,7 +4232,7 @@ int libxl_cpupool_cpuadd_node(libxl_ctx
free(topology);
out:
- libxl_cpumap_dispose(&freemap);
+ libxl_bitmap_dispose(&freemap);
return rc;
}
@@ -4274,7 +4274,7 @@ int libxl_cpupool_cpuremove_node(libxl_c
if (poolinfo[p].poolid == poolid) {
for (cpu = 0; cpu < nr_cpus; cpu++) {
if ((topology[cpu].node == node) &&
- libxl_cpumap_test(&poolinfo[p].cpumap, cpu) &&
+ libxl_bitmap_test(&poolinfo[p].cpumap, cpu) &&
!libxl_cpupool_cpuremove(ctx, poolid, cpu)) {
(*cpus)++;
}
diff --git a/tools/libxl/libxl.h b/tools/libxl/libxl.h
--- a/tools/libxl/libxl.h
+++ b/tools/libxl/libxl.h
@@ -285,8 +285,8 @@ typedef uint64_t libxl_ev_user;
typedef struct {
uint32_t size; /* number of bytes in map */
uint8_t *map;
-} libxl_cpumap;
-void libxl_cpumap_dispose(libxl_cpumap *map);
+} libxl_bitmap;
+void libxl_bitmap_dispose(libxl_bitmap *map);
/* libxl_cpuid_policy_list is a dynamic array storing CPUID policies
* for multiple leafs. It is terminated with an entry holding
@@ -783,10 +783,10 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ct
int *nb_vcpu, int *nrcpus);
void libxl_vcpuinfo_list_free(libxl_vcpuinfo *, int nr);
int libxl_set_vcpuaffinity(libxl_ctx *ctx, uint32_t domid, uint32_t vcpuid,
- libxl_cpumap *cpumap);
+ libxl_bitmap *cpumap);
int libxl_set_vcpuaffinity_all(libxl_ctx *ctx, uint32_t domid,
- unsigned int max_vcpus, libxl_cpumap *cpumap);
-int libxl_set_vcpuonline(libxl_ctx *ctx, uint32_t domid, libxl_cpumap *cpumap);
+ unsigned int max_vcpus, libxl_bitmap *cpumap);
+int libxl_set_vcpuonline(libxl_ctx *ctx, uint32_t domid, libxl_bitmap *cpumap);
libxl_scheduler libxl_get_scheduler(libxl_ctx *ctx);
@@ -836,10 +836,10 @@ int libxl_tmem_shared_auth(libxl_ctx *ct
int auth);
int libxl_tmem_freeable(libxl_ctx *ctx);
-int libxl_get_freecpus(libxl_ctx *ctx, libxl_cpumap *cpumap);
+int libxl_get_freecpus(libxl_ctx *ctx, libxl_bitmap *cpumap);
int libxl_cpupool_create(libxl_ctx *ctx, const char *name,
libxl_scheduler sched,
- libxl_cpumap cpumap, libxl_uuid *uuid,
+ libxl_bitmap cpumap, libxl_uuid *uuid,
uint32_t *poolid);
int libxl_cpupool_destroy(libxl_ctx *ctx, uint32_t poolid);
int libxl_cpupool_rename(libxl_ctx *ctx, const char *name, uint32_t poolid);
diff --git a/tools/libxl/libxl_create.c b/tools/libxl/libxl_create.c
--- a/tools/libxl/libxl_create.c
+++ b/tools/libxl/libxl_create.c
@@ -150,9 +150,9 @@ int libxl__domain_build_info_setdefault(
b_info->cur_vcpus = 1;
if (!b_info->cpumap.size) {
- if (libxl_cpumap_alloc(CTX, &b_info->cpumap))
+ if (libxl_cpu_bitmap_alloc(CTX, &b_info->cpumap))
return ERROR_NOMEM;
- libxl_cpumap_set_any(&b_info->cpumap);
+ libxl_bitmap_set_any(&b_info->cpumap);
}
if (b_info->max_memkb == LIBXL_MEMKB_DEFAULT)
diff --git a/tools/libxl/libxl_json.c b/tools/libxl/libxl_json.c
--- a/tools/libxl/libxl_json.c
+++ b/tools/libxl/libxl_json.c
@@ -99,8 +99,8 @@ yajl_gen_status libxl_uuid_gen_json(yajl
return yajl_gen_string(hand, (const unsigned char *)buf, LIBXL_UUID_FMTLEN);
}
-yajl_gen_status libxl_cpumap_gen_json(yajl_gen hand,
- libxl_cpumap *cpumap)
+yajl_gen_status libxl_bitmap_gen_json(yajl_gen hand,
+ libxl_bitmap *cpumap)
{
yajl_gen_status s;
int i;
@@ -108,8 +108,8 @@ yajl_gen_status libxl_cpumap_gen_json(ya
s = yajl_gen_array_open(hand);
if (s != yajl_gen_status_ok) goto out;
- libxl_for_each_cpu(i, *cpumap) {
- if (libxl_cpumap_test(cpumap, i)) {
+ libxl_for_each_bit(i, *cpumap) {
+ if (libxl_bitmap_test(cpumap, i)) {
s = yajl_gen_integer(hand, i);
if (s != yajl_gen_status_ok) goto out;
}
diff --git a/tools/libxl/libxl_json.h b/tools/libxl/libxl_json.h
--- a/tools/libxl/libxl_json.h
+++ b/tools/libxl/libxl_json.h
@@ -26,7 +26,7 @@ yajl_gen_status libxl_defbool_gen_json(y
yajl_gen_status libxl_domid_gen_json(yajl_gen hand, libxl_domid *p);
yajl_gen_status libxl_uuid_gen_json(yajl_gen hand, libxl_uuid *p);
yajl_gen_status libxl_mac_gen_json(yajl_gen hand, libxl_mac *p);
-yajl_gen_status libxl_cpumap_gen_json(yajl_gen hand, libxl_cpumap *p);
+yajl_gen_status libxl_bitmap_gen_json(yajl_gen hand, libxl_bitmap *p);
yajl_gen_status libxl_cpuid_policy_list_gen_json(yajl_gen hand,
libxl_cpuid_policy_list *p);
yajl_gen_status libxl_string_list_gen_json(yajl_gen hand, libxl_string_list *p);
diff --git a/tools/libxl/libxl_types.idl b/tools/libxl/libxl_types.idl
--- a/tools/libxl/libxl_types.idl
+++ b/tools/libxl/libxl_types.idl
@@ -10,7 +10,7 @@ libxl_defbool = Builtin("defbool", passb
libxl_domid = Builtin("domid", json_fn = "yajl_gen_integer", autogenerate_json = False)
libxl_uuid = Builtin("uuid", passby=PASS_BY_REFERENCE)
libxl_mac = Builtin("mac", passby=PASS_BY_REFERENCE)
-libxl_cpumap = Builtin("cpumap", dispose_fn="libxl_cpumap_dispose", passby=PASS_BY_REFERENCE)
+libxl_bitmap = Builtin("bitmap", dispose_fn="libxl_bitmap_dispose", passby=PASS_BY_REFERENCE)
libxl_cpuid_policy_list = Builtin("cpuid_policy_list", dispose_fn="libxl_cpuid_dispose", passby=PASS_BY_REFERENCE)
libxl_string_list = Builtin("string_list", dispose_fn="libxl_string_list_dispose", passby=PASS_BY_REFERENCE)
@@ -188,7 +188,7 @@ libxl_cpupoolinfo = Struct("cpupoolinfo"
("poolid", uint32),
("sched", libxl_scheduler),
("n_dom", uint32),
- ("cpumap", libxl_cpumap)
+ ("cpumap", libxl_bitmap)
], dir=DIR_OUT)
libxl_vminfo = Struct("vminfo", [
@@ -238,7 +238,7 @@ libxl_domain_sched_params = Struct("doma
libxl_domain_build_info = Struct("domain_build_info",[
("max_vcpus", integer),
("cur_vcpus", integer),
- ("cpumap", libxl_cpumap),
+ ("cpumap", libxl_bitmap),
("tsc_mode", libxl_tsc_mode),
("max_memkb", MemKB),
("target_memkb", MemKB),
@@ -399,7 +399,7 @@ libxl_vcpuinfo = Struct("vcpuinfo", [
("blocked", bool),
("running", bool),
("vcpu_time", uint64), # total vcpu time ran (ns)
- ("cpumap", libxl_cpumap), # current cpu's affinities
+ ("cpumap", libxl_bitmap), # current cpu's affinities
], dir=DIR_OUT)
libxl_physinfo = Struct("physinfo", [
diff --git a/tools/libxl/libxl_utils.c b/tools/libxl/libxl_utils.c
--- a/tools/libxl/libxl_utils.c
+++ b/tools/libxl/libxl_utils.c
@@ -489,47 +489,42 @@ int libxl_mac_to_device_nic(libxl_ctx *c
return rc;
}
-int libxl_cpumap_alloc(libxl_ctx *ctx, libxl_cpumap *cpumap)
+int libxl_bitmap_alloc(libxl_ctx *ctx, libxl_bitmap *bitmap, int n_bits)
{
- int max_cpus;
int sz;
- max_cpus = libxl_get_max_cpus(ctx);
- if (max_cpus == 0)
- return ERROR_FAIL;
-
- sz = (max_cpus + 7) / 8;
- cpumap->map = calloc(sz, sizeof(*cpumap->map));
- if (!cpumap->map)
+ sz = (n_bits + 7) / 8;
+ bitmap->map = calloc(sz, sizeof(*bitmap->map));
+ if (!bitmap->map)
return ERROR_NOMEM;
- cpumap->size = sz;
+ bitmap->size = sz;
return 0;
}
-void libxl_cpumap_dispose(libxl_cpumap *map)
+void libxl_bitmap_dispose(libxl_bitmap *map)
{
free(map->map);
}
-int libxl_cpumap_test(libxl_cpumap *cpumap, int cpu)
+int libxl_bitmap_test(const libxl_bitmap *bitmap, int bit)
{
- if (cpu >= cpumap->size * 8)
+ if (bit >= bitmap->size * 8)
return 0;
- return (cpumap->map[cpu / 8] & (1 << (cpu & 7))) ? 1 : 0;
+ return (bitmap->map[bit / 8] & (1 << (bit & 7))) ? 1 : 0;
}
-void libxl_cpumap_set(libxl_cpumap *cpumap, int cpu)
+void libxl_bitmap_set(libxl_bitmap *bitmap, int bit)
{
- if (cpu >= cpumap->size * 8)
+ if (bit >= bitmap->size * 8)
return;
- cpumap->map[cpu / 8] |= 1 << (cpu & 7);
+ bitmap->map[bit / 8] |= 1 << (bit & 7);
}
-void libxl_cpumap_reset(libxl_cpumap *cpumap, int cpu)
+void libxl_bitmap_reset(libxl_bitmap *bitmap, int bit)
{
- if (cpu >= cpumap->size * 8)
+ if (bit >= bitmap->size * 8)
return;
- cpumap->map[cpu / 8] &= ~(1 << (cpu & 7));
+ bitmap->map[bit / 8] &= ~(1 << (bit & 7));
}
int libxl_get_max_cpus(libxl_ctx *ctx)
diff --git a/tools/libxl/libxl_utils.h b/tools/libxl/libxl_utils.h
--- a/tools/libxl/libxl_utils.h
+++ b/tools/libxl/libxl_utils.h
@@ -63,25 +63,38 @@ int libxl_devid_to_device_nic(libxl_ctx
int libxl_vdev_to_device_disk(libxl_ctx *ctx, uint32_t domid, const char *vdev,
libxl_device_disk *disk);
-int libxl_cpumap_alloc(libxl_ctx *ctx, libxl_cpumap *cpumap);
-int libxl_cpumap_test(libxl_cpumap *cpumap, int cpu);
-void libxl_cpumap_set(libxl_cpumap *cpumap, int cpu);
-void libxl_cpumap_reset(libxl_cpumap *cpumap, int cpu);
-static inline void libxl_cpumap_set_any(libxl_cpumap *cpumap)
+int libxl_bitmap_alloc(libxl_ctx *ctx, libxl_bitmap *bitmap, int n_bits);
+ /* Allocated bimap is from malloc, libxl_bitmap_dispose() to be
+ * called by the application when done. */
+int libxl_bitmap_test(const libxl_bitmap *bitmap, int bit);
+void libxl_bitmap_set(libxl_bitmap *bitmap, int bit);
+void libxl_bitmap_reset(libxl_bitmap *bitmap, int bit);
+static inline void libxl_bitmap_set_any(libxl_bitmap *bitmap)
{
- memset(cpumap->map, -1, cpumap->size);
+ memset(bitmap->map, -1, bitmap->size);
}
-static inline void libxl_cpumap_set_none(libxl_cpumap *cpumap)
+static inline void libxl_bitmap_set_none(libxl_bitmap *bitmap)
{
- memset(cpumap->map, 0, cpumap->size);
+ memset(bitmap->map, 0, bitmap->size);
}
-static inline int libxl_cpumap_cpu_valid(libxl_cpumap *cpumap, int cpu)
+static inline int libxl_bitmap_cpu_valid(const libxl_bitmap *bitmap, int bit)
{
- return cpu >= 0 && cpu < (cpumap->size * 8);
+ return bit >= 0 && bit < (bitmap->size * 8);
}
-#define libxl_for_each_cpu(var, map) for (var = 0; var < (map).size * 8; var++)
-#define libxl_for_each_set_cpu(v, m) for (v = 0; v < (m).size * 8; v++) \
- if (libxl_cpumap_test(&(m), v))
+#define libxl_for_each_bit(var, map) for (var = 0; var < (map).size * 8; var++)
+#define libxl_for_each_set_bit(v, m) for (v = 0; v < (m).size * 8; v++) \
+ if (libxl_bitmap_test(&(m), v))
+
+static inline int libxl_cpu_bitmap_alloc(libxl_ctx *ctx, libxl_bitmap *cpumap)
+{
+ int max_cpus;
+
+ max_cpus = libxl_get_max_cpus(ctx);
+ if (max_cpus == 0)
+ return ERROR_FAIL;
+
+ return libxl_bitmap_alloc(ctx, cpumap, max_cpus);
+}
static inline uint32_t libxl__sizekb_to_mb(uint32_t s) {
return (s + 1023) / 1024;
diff --git a/tools/libxl/xl_cmdimpl.c b/tools/libxl/xl_cmdimpl.c
--- a/tools/libxl/xl_cmdimpl.c
+++ b/tools/libxl/xl_cmdimpl.c
@@ -491,19 +491,19 @@ static void split_string_into_string_lis
free(s);
}
-static int vcpupin_parse(char *cpu, libxl_cpumap *cpumap)
-{
- libxl_cpumap exclude_cpumap;
+static int vcpupin_parse(char *cpu, libxl_bitmap *cpumap)
+{
+ libxl_bitmap exclude_cpumap;
uint32_t cpuida, cpuidb;
char *endptr, *toka, *tokb, *saveptr = NULL;
int i, rc = 0, rmcpu;
if (!strcmp(cpu, "all")) {
- libxl_cpumap_set_any(cpumap);
+ libxl_bitmap_set_any(cpumap);
return 0;
}
- if (libxl_cpumap_alloc(ctx, &exclude_cpumap)) {
+ if (libxl_cpu_bitmap_alloc(ctx, &exclude_cpumap)) {
fprintf(stderr, "Error: Failed to allocate cpumap.\n");
return ENOMEM;
}
@@ -533,19 +533,19 @@ static int vcpupin_parse(char *cpu, libx
}
}
while (cpuida <= cpuidb) {
- rmcpu == 0 ? libxl_cpumap_set(cpumap, cpuida) :
- libxl_cpumap_set(&exclude_cpumap, cpuida);
+ rmcpu == 0 ? libxl_bitmap_set(cpumap, cpuida) :
+ libxl_bitmap_set(&exclude_cpumap, cpuida);
cpuida++;
}
}
/* Clear all the cpus from the removal list */
- libxl_for_each_set_cpu(i, exclude_cpumap) {
- libxl_cpumap_reset(cpumap, i);
+ libxl_for_each_set_bit(i, exclude_cpumap) {
+ libxl_bitmap_reset(cpumap, i);
}
vcpp_out:
- libxl_cpumap_dispose(&exclude_cpumap);
+ libxl_bitmap_dispose(&exclude_cpumap);
return rc;
}
@@ -685,7 +685,7 @@ static void parse_config_data(const char
if (!xlu_cfg_get_list (config, "cpus", &cpus, 0, 1)) {
int i, n_cpus = 0;
- if (libxl_cpumap_alloc(ctx, &b_info->cpumap)) {
+ if (libxl_cpu_bitmap_alloc(ctx, &b_info->cpumap)) {
fprintf(stderr, "Unable to allocate cpumap\n");
exit(1);
}
@@ -705,14 +705,14 @@ static void parse_config_data(const char
* the cpumap derived from the list ensures memory is being
* allocated on the proper nodes anyway.
*/
- libxl_cpumap_set_none(&b_info->cpumap);
+ libxl_bitmap_set_none(&b_info->cpumap);
while ((buf = xlu_cfg_get_listitem(cpus, n_cpus)) != NULL) {
i = atoi(buf);
- if (!libxl_cpumap_cpu_valid(&b_info->cpumap, i)) {
+ if (!libxl_bitmap_cpu_valid(&b_info->cpumap, i)) {
fprintf(stderr, "cpu %d illegal\n", i);
exit(1);
}
- libxl_cpumap_set(&b_info->cpumap, i);
+ libxl_bitmap_set(&b_info->cpumap, i);
if (n_cpus < b_info->max_vcpus)
vcpu_to_pcpu[n_cpus] = i;
n_cpus++;
@@ -721,12 +721,12 @@ static void parse_config_data(const char
else if (!xlu_cfg_get_string (config, "cpus", &buf, 0)) {
char *buf2 = strdup(buf);
- if (libxl_cpumap_alloc(ctx, &b_info->cpumap)) {
+ if (libxl_cpu_bitmap_alloc(ctx, &b_info->cpumap)) {
fprintf(stderr, "Unable to allocate cpumap\n");
exit(1);
}
- libxl_cpumap_set_none(&b_info->cpumap);
+ libxl_bitmap_set_none(&b_info->cpumap);
if (vcpupin_parse(buf2, &b_info->cpumap))
exit(1);
free(buf2);
@@ -1806,26 +1806,26 @@ start:
/* If single vcpu to pcpu mapping was requested, honour it */
if (vcpu_to_pcpu) {
- libxl_cpumap vcpu_cpumap;
-
- libxl_cpumap_alloc(ctx, &vcpu_cpumap);
+ libxl_bitmap vcpu_cpumap;
+
+ libxl_cpu_bitmap_alloc(ctx, &vcpu_cpumap);
for (i = 0; i < d_config.b_info.max_vcpus; i++) {
if (vcpu_to_pcpu[i] != -1) {
- libxl_cpumap_set_none(&vcpu_cpumap);
- libxl_cpumap_set(&vcpu_cpumap, vcpu_to_pcpu[i]);
+ libxl_bitmap_set_none(&vcpu_cpumap);
+ libxl_bitmap_set(&vcpu_cpumap, vcpu_to_pcpu[i]);
} else {
- libxl_cpumap_set_any(&vcpu_cpumap);
+ libxl_bitmap_set_any(&vcpu_cpumap);
}
if (libxl_set_vcpuaffinity(ctx, domid, i, &vcpu_cpumap)) {
fprintf(stderr, "setting affinity failed on vcpu `%d'.\n", i);
- libxl_cpumap_dispose(&vcpu_cpumap);
+ libxl_bitmap_dispose(&vcpu_cpumap);
free(vcpu_to_pcpu);
ret = ERROR_FAIL;
goto error_out;
}
}
- libxl_cpumap_dispose(&vcpu_cpumap);
+ libxl_bitmap_dispose(&vcpu_cpumap);
free(vcpu_to_pcpu); vcpu_to_pcpu = NULL;
}
@@ -4063,7 +4063,7 @@ int main_vcpulist(int argc, char **argv)
static void vcpupin(const char *d, const char *vcpu, char *cpu)
{
libxl_vcpuinfo *vcpuinfo;
- libxl_cpumap cpumap;
+ libxl_bitmap cpumap;
uint32_t vcpuid;
char *endptr;
@@ -4080,7 +4080,7 @@ static void vcpupin(const char *d, const
find_domain(d);
- if (libxl_cpumap_alloc(ctx, &cpumap)) {
+ if (libxl_cpu_bitmap_alloc(ctx, &cpumap)) {
goto vcpupin_out;
}
@@ -4107,7 +4107,7 @@ static void vcpupin(const char *d, const
libxl_vcpuinfo_list_free(vcpuinfo, nb_vcpu);
}
vcpupin_out1:
- libxl_cpumap_dispose(&cpumap);
+ libxl_bitmap_dispose(&cpumap);
vcpupin_out:
;
}
@@ -4127,7 +4127,7 @@ static void vcpuset(const char *d, const
{
char *endptr;
unsigned int max_vcpus, i;
- libxl_cpumap cpumap;
+ libxl_bitmap cpumap;
max_vcpus = strtoul(nr_vcpus, &endptr, 10);
if (nr_vcpus == endptr) {
@@ -4137,17 +4137,17 @@ static void vcpuset(const char *d, const
find_domain(d);
- if (libxl_cpumap_alloc(ctx, &cpumap)) {
- fprintf(stderr, "libxl_cpumap_alloc failed\n");
+ if (libxl_cpu_bitmap_alloc(ctx, &cpumap)) {
+ fprintf(stderr, "libxl_cpu_bitmap_alloc failed\n");
return;
}
for (i = 0; i < max_vcpus; i++)
- libxl_cpumap_set(&cpumap, i);
+ libxl_bitmap_set(&cpumap, i);
if (libxl_set_vcpuonline(ctx, domid, &cpumap) < 0)
fprintf(stderr, "libxl_set_vcpuonline failed domid=%d max_vcpus=%d\n", domid, max_vcpus);
- libxl_cpumap_dispose(&cpumap);
+ libxl_bitmap_dispose(&cpumap);
}
int main_vcpuset(int argc, char **argv)
@@ -4211,7 +4211,7 @@ static void output_physinfo(void)
libxl_physinfo info;
const libxl_version_info *vinfo;
unsigned int i;
- libxl_cpumap cpumap;
+ libxl_bitmap cpumap;
int n = 0;
if (libxl_get_physinfo(ctx, &info) != 0) {
@@ -4243,8 +4243,8 @@ static void output_physinfo(void)
printf("sharing_used_memory : %"PRIu64"\n", info.sharing_used_frames / i);
}
if (!libxl_get_freecpus(ctx, &cpumap)) {
- libxl_for_each_cpu(i, cpumap)
- if (libxl_cpumap_test(&cpumap, i))
+ libxl_for_each_bit(i, cpumap)
+ if (libxl_bitmap_test(&cpumap, i))
n++;
printf("free_cpus : %d\n", n);
free(cpumap.map);
@@ -5866,8 +5866,8 @@ int main_cpupoolcreate(int argc, char **
XLU_ConfigList *cpus;
XLU_ConfigList *nodes;
int n_cpus, n_nodes, i, n;
- libxl_cpumap freemap;
- libxl_cpumap cpumap;
+ libxl_bitmap freemap;
+ libxl_bitmap cpumap;
libxl_uuid uuid;
libxl_cputopology *topology;
int rc = -ERROR_FAIL;
@@ -5980,7 +5980,7 @@ int main_cpupoolcreate(int argc, char **
fprintf(stderr, "libxl_get_freecpus failed\n");
goto out_cfg;
}
- if (libxl_cpumap_alloc(ctx, &cpumap)) {
+ if (libxl_cpu_bitmap_alloc(ctx, &cpumap)) {
fprintf(stderr, "Failed to allocate cpumap\n");
goto out_cfg;
}
@@ -5997,8 +5997,8 @@ int main_cpupoolcreate(int argc, char **
n = atoi(buf);
for (i = 0; i < nr; i++) {
if ((topology[i].node == n) &&
- libxl_cpumap_test(&freemap, i)) {
- libxl_cpumap_set(&cpumap, i);
+ libxl_bitmap_test(&freemap, i)) {
+ libxl_bitmap_set(&cpumap, i);
n_cpus++;
}
}
@@ -6016,11 +6016,11 @@ int main_cpupoolcreate(int argc, char **
while ((buf = xlu_cfg_get_listitem(cpus, n_cpus)) != NULL) {
i = atoi(buf);
if ((i < 0) || (i >= freemap.size * 8) ||
- !libxl_cpumap_test(&freemap, i)) {
+ !libxl_bitmap_test(&freemap, i)) {
fprintf(stderr, "cpu %d illegal or not free\n", i);
goto out_cfg;
}
- libxl_cpumap_set(&cpumap, i);
+ libxl_bitmap_set(&cpumap, i);
n_cpus++;
}
} else
@@ -6118,8 +6118,8 @@ int main_cpupoollist(int argc, char **ar
printf("%-19s", name);
free(name);
n = 0;
- libxl_for_each_cpu(c, poolinfo[p].cpumap)
- if (libxl_cpumap_test(&poolinfo[p].cpumap, c)) {
+ libxl_for_each_bit(c, poolinfo[p].cpumap)
+ if (libxl_bitmap_test(&poolinfo[p].cpumap, c)) {
if (n && opt_cpus) printf(",");
if (opt_cpus) printf("%d", c);
n++;
@@ -6318,7 +6318,7 @@ int main_cpupoolnumasplit(int argc, char
int n_cpus;
char name[16];
libxl_uuid uuid;
- libxl_cpumap cpumap;
+ libxl_bitmap cpumap;
libxl_cpupoolinfo *poolinfo;
libxl_cputopology *topology;
libxl_dominfo info;
@@ -6348,7 +6348,7 @@ int main_cpupoolnumasplit(int argc, char
return -ERROR_FAIL;
}
- if (libxl_cpumap_alloc(ctx, &cpumap)) {
+ if (libxl_cpu_bitmap_alloc(ctx, &cpumap)) {
fprintf(stderr, "Failed to allocate cpumap\n");
libxl_cputopology_list_free(topology, n_cpus);
return -ERROR_FAIL;
@@ -6374,7 +6374,7 @@ int main_cpupoolnumasplit(int argc, char
for (c = 0; c < n_cpus; c++) {
if (topology[c].node == node) {
topology[c].node = LIBXL_CPUTOPOLOGY_INVALID_ENTRY;
- libxl_cpumap_set(&cpumap, n);
+ libxl_bitmap_set(&cpumap, n);
n++;
}
}
@@ -6396,7 +6396,7 @@ int main_cpupoolnumasplit(int argc, char
fprintf(stderr, "failed to offline vcpus\n");
goto out;
}
- libxl_cpumap_set_none(&cpumap);
+ libxl_bitmap_set_none(&cpumap);
for (c = 0; c < n_cpus; c++) {
if (topology[c].node == LIBXL_CPUTOPOLOGY_INVALID_ENTRY) {
@@ -6434,7 +6434,7 @@ int main_cpupoolnumasplit(int argc, char
out:
libxl_cputopology_list_free(topology, n_cpus);
- libxl_cpumap_dispose(&cpumap);
+ libxl_bitmap_dispose(&cpumap);
return ret;
}
diff --git a/tools/python/xen/lowlevel/xl/xl.c b/tools/python/xen/lowlevel/xl/xl.c
--- a/tools/python/xen/lowlevel/xl/xl.c
+++ b/tools/python/xen/lowlevel/xl/xl.c
@@ -231,14 +231,14 @@ int attrib__libxl_cpuid_policy_list_set(
return -1;
}
-int attrib__libxl_cpumap_set(PyObject *v, libxl_cpumap *pptr)
+int attrib__libxl_bitmap_set(PyObject *v, libxl_bitmap *pptr)
{
int i;
long cpu;
for (i = 0; i < PyList_Size(v); i++) {
cpu = PyInt_AsLong(PyList_GetItem(v, i));
- libxl_cpumap_set(pptr, cpu);
+ libxl_bitmap_set(pptr, cpu);
}
return 0;
}
@@ -293,14 +293,14 @@ PyObject *attrib__libxl_cpuid_policy_lis
return NULL;
}
-PyObject *attrib__libxl_cpumap_get(libxl_cpumap *pptr)
+PyObject *attrib__libxl_bitmap_get(libxl_bitmap *pptr)
{
PyObject *cpulist = NULL;
int i;
cpulist = PyList_New(0);
- libxl_for_each_cpu(i, *pptr) {
- if ( libxl_cpumap_test(pptr, i) ) {
+ libxl_for_each_bit(i, *pptr) {
+ if ( libxl_bitmap_test(pptr, i) ) {
PyObject* pyint = PyInt_FromLong(i);
PyList_Append(cpulist, pyint);
next prev parent reply other threads:[~2012-06-15 17:04 UTC|newest]
Thread overview: 52+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-06-15 17:04 [PATCH 00 of 10 v2] Automatic NUMA placement for xl Dario Faggioli
2012-06-15 17:04 ` [PATCH 01 of 10 v2] libxl: fix a typo in the GCREALLOC_ARRAY macro Dario Faggioli
2012-06-21 8:53 ` Ian Campbell
2012-06-26 16:00 ` Ian Campbell
2012-06-26 16:26 ` Dario Faggioli
2012-06-15 17:04 ` [PATCH 02 of 10 v2] libxl: add a new Array type to the IDL Dario Faggioli
2012-06-15 17:04 ` [PATCH 03 of 10 v2] libxl, libxc: introduce libxl_get_numainfo() Dario Faggioli
2012-06-21 9:02 ` Ian Campbell
2012-06-21 10:00 ` Dario Faggioli
2012-06-21 10:21 ` Ian Campbell
2012-06-15 17:04 ` [PATCH 04 of 10 v2] xl: add more NUMA information to `xl info -n' Dario Faggioli
2012-06-21 9:04 ` Ian Campbell
2012-06-15 17:04 ` Dario Faggioli [this message]
2012-06-21 9:12 ` [PATCH 05 of 10 v2] libxl: rename libxl_cpumap to libxl_bitmap Ian Campbell
2012-06-21 9:49 ` Dario Faggioli
2012-06-21 10:22 ` Ian Campbell
2012-06-15 17:04 ` [PATCH 06 of 10 v2] libxl: expand the libxl_bitmap API a bit Dario Faggioli
2012-06-21 9:30 ` Ian Campbell
2012-06-21 9:46 ` Dario Faggioli
2012-06-15 17:04 ` [PATCH 07 of 10 v2] libxl: introduce some node map helpers Dario Faggioli
2012-06-21 9:35 ` Ian Campbell
2012-06-21 9:44 ` Dario Faggioli
2012-06-15 17:04 ` [PATCH 08 of 10 v2] libxl: enable automatic placement of guests on NUMA nodes Dario Faggioli
2012-06-21 11:40 ` Ian Campbell
2012-06-21 16:34 ` Dario Faggioli
2012-06-22 10:14 ` Ian Campbell
2012-06-26 16:25 ` Dario Faggioli
2012-06-26 16:26 ` Ian Campbell
2012-06-26 17:23 ` Ian Jackson
2012-06-21 16:16 ` George Dunlap
2012-06-21 16:43 ` Dario Faggioli
2012-06-22 10:05 ` George Dunlap
2012-06-26 11:03 ` Ian Jackson
2012-06-26 15:20 ` Dario Faggioli
2012-06-27 8:15 ` Dario Faggioli
2012-06-28 7:25 ` Zhang, Yang Z
2012-06-28 8:36 ` George Dunlap
2012-06-29 5:38 ` Zhang, Yang Z
2012-06-29 9:46 ` Dario Faggioli
2012-06-28 10:12 ` Dario Faggioli
2012-06-28 12:41 ` Pasi Kärkkäinen
2012-06-28 17:03 ` Dario Faggioli
2012-06-29 5:29 ` Zhang, Yang Z
2012-06-29 9:38 ` Dario Faggioli
2012-06-15 17:04 ` [PATCH 09 of 10 v2] libxl: have NUMA placement deal with cpupools Dario Faggioli
2012-06-21 13:31 ` Ian Campbell
2012-06-21 13:54 ` Dario Faggioli
2012-06-21 13:58 ` Ian Campbell
2012-06-15 17:04 ` [PATCH 10 of 10 v2] Some automatic NUMA placement documentation Dario Faggioli
2012-06-18 15:54 ` Dario Faggioli
2012-06-21 13:38 ` Ian Campbell
2012-06-21 13:57 ` Dario Faggioli
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=5d3cbf2e6370d1989bcd.1339779873@Solace \
--to=raistlin@linux.it \
--cc=Ian.Campbell@citrix.com \
--cc=Ian.Jackson@eu.citrix.com \
--cc=Stefano.Stabellini@eu.citrix.com \
--cc=andre.przywara@amd.com \
--cc=george.dunlap@eu.citrix.com \
--cc=juergen.gross@ts.fujitsu.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).