xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Juergen Gross <jgross@suse.com>
To: xen-devel@lists.xenproject.org
Cc: Juergen Gross <jgross@suse.com>,
	wei.liu2@citrix.com, ian.jackson@eu.citrix.com,
	haozhong.zhang@intel.com
Subject: [PATCH 2/9] libxl: carve out cpupool specific functions from libxl.c
Date: Wed,  8 Feb 2017 14:13:39 +0100	[thread overview]
Message-ID: <20170208131346.20826-3-jgross@suse.com> (raw)
In-Reply-To: <20170208131346.20826-1-jgross@suse.com>

libxl.c has grown to an uncomfortable size. Carve out the cpupool
related functions to libxl_cpupool.c.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 tools/libxl/Makefile        |   1 +
 tools/libxl/libxl.c         | 418 -----------------------------------------
 tools/libxl/libxl_cpupool.c | 443 ++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 444 insertions(+), 418 deletions(-)
 create mode 100644 tools/libxl/libxl_cpupool.c

diff --git a/tools/libxl/Makefile b/tools/libxl/Makefile
index 91e2f97..0125871 100644
--- a/tools/libxl/Makefile
+++ b/tools/libxl/Makefile
@@ -137,6 +137,7 @@ LIBXL_OBJS = flexarray.o libxl.o libxl_create.o libxl_dm.o libxl_pci.o \
 			libxl_qmp.o libxl_event.o libxl_fork.o \
 			libxl_dom_suspend.o libxl_dom_save.o libxl_usb.o \
 			libxl_vtpm.o libxl_nic.o \
+			libxl_cpupool.o \
                         $(LIBXL_OBJS-y)
 LIBXL_OBJS += libxl_genid.o
 LIBXL_OBJS += _libxl_types.o libxl_flask.o _libxl_types_internal.o
diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
index 0641a8a..53ad081 100644
--- a/tools/libxl/libxl.c
+++ b/tools/libxl/libxl.c
@@ -685,100 +685,6 @@ int libxl_domain_info(libxl_ctx *ctx, libxl_dominfo *info_r,
     return 0;
 }
 
-/* Returns:
- *   0 - success
- *   ERROR_FAIL + errno == ENOENT - no entry found
- *   ERROR_$FOO + errno != ENOENT - other failure
- */
-static int cpupool_info(libxl__gc *gc,
-                        libxl_cpupoolinfo *info,
-                        uint32_t poolid,
-                        bool exact /* exactly poolid or >= poolid */)
-{
-    xc_cpupoolinfo_t *xcinfo;
-    int rc = ERROR_FAIL;
-
-    xcinfo = xc_cpupool_getinfo(CTX->xch, poolid);
-    if (xcinfo == NULL)
-    {
-        if (exact || errno != ENOENT)
-            LOGE(ERROR, "failed to get info for cpupool%d", poolid);
-        return ERROR_FAIL;
-    }
-
-    if (exact && xcinfo->cpupool_id != poolid)
-    {
-        LOG(ERROR, "got info for cpupool%d, wanted cpupool%d\n",
-            xcinfo->cpupool_id, poolid);
-        goto out;
-    }
-
-    info->poolid = xcinfo->cpupool_id;
-    info->pool_name = libxl_cpupoolid_to_name(CTX, info->poolid);
-    if (!info->pool_name) {
-        rc = ERROR_FAIL;
-        goto out;
-    }
-    info->sched = xcinfo->sched_id;
-    info->n_dom = xcinfo->n_dom;
-    rc = libxl_cpu_bitmap_alloc(CTX, &info->cpumap, 0);
-    if (rc)
-        goto out;
-
-    memcpy(info->cpumap.map, xcinfo->cpumap, info->cpumap.size);
-
-    rc = 0;
-out:
-    xc_cpupool_infofree(CTX->xch, xcinfo);
-    return rc;
-}
-
-int libxl_cpupool_info(libxl_ctx *ctx,
-                       libxl_cpupoolinfo *info, uint32_t poolid)
-{
-    GC_INIT(ctx);
-    int rc = cpupool_info(gc, info, poolid, true);
-    GC_FREE;
-    return rc;
-}
-
-libxl_cpupoolinfo * libxl_list_cpupool(libxl_ctx *ctx, int *nb_pool_out)
-{
-    GC_INIT(ctx);
-    libxl_cpupoolinfo info, *ptr;
-
-    int i;
-    uint32_t poolid;
-
-    ptr = NULL;
-
-    poolid = 0;
-    for (i = 0;; i++) {
-        libxl_cpupoolinfo_init(&info);
-        if (cpupool_info(gc, &info, poolid, false)) {
-            libxl_cpupoolinfo_dispose(&info);
-            if (errno != ENOENT) goto out;
-            break;
-        }
-
-        ptr = libxl__realloc(NOGC, ptr, (i+1) * sizeof(libxl_cpupoolinfo));
-        ptr[i] = info;
-        poolid = info.poolid + 1;
-        /* Don't dispose of info because it will be returned to caller */
-    }
-
-    *nb_pool_out = i;
-
-    GC_FREE;
-    return ptr;
-
-out:
-    libxl_cpupoolinfo_list_free(ptr, i);
-    *nb_pool_out = 0;
-    GC_FREE;
-    return NULL;
-}
-
 /* this API call only list VM running on this host. A VM can
  * be an aggregate of multiple domains. */
 libxl_vminfo * libxl_list_vm(libxl_ctx *ctx, int *nb_vm_out)
@@ -6256,330 +6162,6 @@ out:
     return rc;
 }
 
-int libxl_get_freecpus(libxl_ctx *ctx, libxl_bitmap *cpumap)
-{
-    int ncpus;
-
-    ncpus = libxl_get_max_cpus(ctx);
-    if (ncpus < 0)
-        return ncpus;
-
-    cpumap->map = xc_cpupool_freeinfo(ctx->xch);
-    if (cpumap->map == NULL)
-        return ERROR_FAIL;
-
-    cpumap->size = (ncpus + 7) / 8;
-
-    return 0;
-}
-
-int libxl_cpupool_create(libxl_ctx *ctx, const char *name,
-                         libxl_scheduler sched,
-                         libxl_bitmap cpumap, libxl_uuid *uuid,
-                         uint32_t *poolid)
-{
-    GC_INIT(ctx);
-    int rc;
-    int i;
-    xs_transaction_t t;
-    char *uuid_string;
-
-    uuid_string = libxl__uuid2string(gc, *uuid);
-    if (!uuid_string) {
-        GC_FREE;
-        return ERROR_NOMEM;
-    }
-
-    rc = xc_cpupool_create(ctx->xch, poolid, sched);
-    if (rc) {
-        LOGEV(ERROR, rc, "Could not create cpupool");
-        GC_FREE;
-        return ERROR_FAIL;
-    }
-
-    libxl_for_each_bit(i, cpumap)
-        if (libxl_bitmap_test(&cpumap, i)) {
-            rc = xc_cpupool_addcpu(ctx->xch, *poolid, i);
-            if (rc) {
-                LOGEV(ERROR, rc, "Error moving cpu to cpupool");
-                libxl_cpupool_destroy(ctx, *poolid);
-                GC_FREE;
-                return ERROR_FAIL;
-            }
-        }
-
-    for (;;) {
-        t = xs_transaction_start(ctx->xsh);
-
-        xs_mkdir(ctx->xsh, t, GCSPRINTF("/local/pool/%d", *poolid));
-        libxl__xs_printf(gc, t,
-                         GCSPRINTF("/local/pool/%d/uuid", *poolid),
-                         "%s", uuid_string);
-        libxl__xs_printf(gc, t,
-                         GCSPRINTF("/local/pool/%d/name", *poolid),
-                         "%s", name);
-
-        if (xs_transaction_end(ctx->xsh, t, 0) || (errno != EAGAIN)) {
-            GC_FREE;
-            return 0;
-        }
-    }
-}
-
-int libxl_cpupool_destroy(libxl_ctx *ctx, uint32_t poolid)
-{
-    GC_INIT(ctx);
-    int rc, i;
-    xc_cpupoolinfo_t *info;
-    xs_transaction_t t;
-    libxl_bitmap cpumap;
-
-    info = xc_cpupool_getinfo(ctx->xch, poolid);
-    if (info == NULL) {
-        GC_FREE;
-        return ERROR_NOMEM;
-    }
-
-    rc = ERROR_INVAL;
-    if ((info->cpupool_id != poolid) || (info->n_dom))
-        goto out;
-
-    rc = libxl_cpu_bitmap_alloc(ctx, &cpumap, 0);
-    if (rc)
-        goto out;
-
-    memcpy(cpumap.map, info->cpumap, cpumap.size);
-    libxl_for_each_bit(i, cpumap)
-        if (libxl_bitmap_test(&cpumap, i)) {
-            rc = xc_cpupool_removecpu(ctx->xch, poolid, i);
-            if (rc) {
-                LOGEV(ERROR, rc, "Error removing cpu from cpupool");
-                rc = ERROR_FAIL;
-                goto out1;
-            }
-        }
-
-    rc = xc_cpupool_destroy(ctx->xch, poolid);
-    if (rc) {
-        LOGEV(ERROR, rc, "Could not destroy cpupool");
-        rc = ERROR_FAIL;
-        goto out1;
-    }
-
-    for (;;) {
-        t = xs_transaction_start(ctx->xsh);
-
-        xs_rm(ctx->xsh, XBT_NULL, GCSPRINTF("/local/pool/%d", poolid));
-
-        if (xs_transaction_end(ctx->xsh, t, 0) || (errno != EAGAIN))
-            break;
-    }
-
-    rc = 0;
-
-out1:
-    libxl_bitmap_dispose(&cpumap);
-out:
-    xc_cpupool_infofree(ctx->xch, info);
-    GC_FREE;
-
-    return rc;
-}
-
-int libxl_cpupool_rename(libxl_ctx *ctx, const char *name, uint32_t poolid)
-{
-    GC_INIT(ctx);
-    xs_transaction_t t;
-    xc_cpupoolinfo_t *info;
-    int rc;
-
-    info = xc_cpupool_getinfo(ctx->xch, poolid);
-    if (info == NULL) {
-        GC_FREE;
-        return ERROR_NOMEM;
-    }
-
-    rc = ERROR_INVAL;
-    if (info->cpupool_id != poolid)
-        goto out;
-
-    rc = 0;
-
-    for (;;) {
-        t = xs_transaction_start(ctx->xsh);
-
-        libxl__xs_printf(gc, t,
-                         GCSPRINTF("/local/pool/%d/name", poolid),
-                         "%s", name);
-
-        if (xs_transaction_end(ctx->xsh, t, 0))
-            break;
-
-        if (errno == EAGAIN)
-            continue;
-
-        rc = ERROR_FAIL;
-        break;
-    }
-
-out:
-    xc_cpupool_infofree(ctx->xch, info);
-    GC_FREE;
-
-    return rc;
-}
-
-int libxl_cpupool_cpuadd(libxl_ctx *ctx, uint32_t poolid, int cpu)
-{
-    GC_INIT(ctx);
-    int rc = 0;
-
-    rc = xc_cpupool_addcpu(ctx->xch, poolid, cpu);
-    if (rc) {
-        LOGE(ERROR, "Error moving cpu %d to cpupool", cpu);
-        rc = ERROR_FAIL;
-    }
-
-    GC_FREE;
-    return rc;
-}
-
-int libxl_cpupool_cpuadd_cpumap(libxl_ctx *ctx, uint32_t poolid,
-                                const libxl_bitmap *cpumap)
-{
-    int c, ncpus = 0, rc = 0;
-
-    libxl_for_each_set_bit(c, *cpumap) {
-        if (!libxl_cpupool_cpuadd(ctx, poolid, c))
-            ncpus++;
-    }
-
-    if (ncpus != libxl_bitmap_count_set(cpumap))
-        rc = ERROR_FAIL;
-
-    return rc;
-}
-
-int libxl_cpupool_cpuadd_node(libxl_ctx *ctx, uint32_t poolid, int node, int *cpus)
-{
-    int rc = 0;
-    int cpu, nr;
-    libxl_bitmap freemap;
-    libxl_cputopology *topology;
-
-    if (libxl_get_freecpus(ctx, &freemap)) {
-        return ERROR_FAIL;
-    }
-
-    topology = libxl_get_cpu_topology(ctx, &nr);
-    if (!topology) {
-        rc = ERROR_FAIL;
-        goto out;
-    }
-
-    *cpus = 0;
-    for (cpu = 0; cpu < nr; cpu++) {
-        if (libxl_bitmap_test(&freemap, cpu) && (topology[cpu].node == node) &&
-            !libxl_cpupool_cpuadd(ctx, poolid, cpu)) {
-                (*cpus)++;
-        }
-        libxl_cputopology_dispose(&topology[cpu]);
-    }
-
-    free(topology);
-out:
-    libxl_bitmap_dispose(&freemap);
-    return rc;
-}
-
-int libxl_cpupool_cpuremove(libxl_ctx *ctx, uint32_t poolid, int cpu)
-{
-    GC_INIT(ctx);
-    int rc = 0;
-
-    rc = xc_cpupool_removecpu(ctx->xch, poolid, cpu);
-    if (rc) {
-        LOGE(ERROR, "Error removing cpu %d from cpupool", cpu);
-        rc = ERROR_FAIL;
-    }
-
-    GC_FREE;
-    return rc;
-}
-
-int libxl_cpupool_cpuremove_cpumap(libxl_ctx *ctx, uint32_t poolid,
-                                   const libxl_bitmap *cpumap)
-{
-    int c, ncpus = 0, rc = 0;
-
-    libxl_for_each_set_bit(c, *cpumap) {
-        if (!libxl_cpupool_cpuremove(ctx, poolid, c))
-            ncpus++;
-    }
-
-    if (ncpus != libxl_bitmap_count_set(cpumap))
-        rc = ERROR_FAIL;
-
-    return rc;
-}
-
-int libxl_cpupool_cpuremove_node(libxl_ctx *ctx, uint32_t poolid, int node, int *cpus)
-{
-    int ret = 0;
-    int n_pools;
-    int p;
-    int cpu, nr_cpus;
-    libxl_cputopology *topology;
-    libxl_cpupoolinfo *poolinfo;
-
-    poolinfo = libxl_list_cpupool(ctx, &n_pools);
-    if (!poolinfo) {
-        return ERROR_NOMEM;
-    }
-
-    topology = libxl_get_cpu_topology(ctx, &nr_cpus);
-    if (!topology) {
-        ret = ERROR_FAIL;
-        goto out;
-    }
-
-    *cpus = 0;
-    for (p = 0; p < n_pools; p++) {
-        if (poolinfo[p].poolid == poolid) {
-            for (cpu = 0; cpu < nr_cpus; cpu++) {
-                if ((topology[cpu].node == node) &&
-                    libxl_bitmap_test(&poolinfo[p].cpumap, cpu) &&
-                    !libxl_cpupool_cpuremove(ctx, poolid, cpu)) {
-                        (*cpus)++;
-                }
-            }
-        }
-    }
-
-    libxl_cputopology_list_free(topology, nr_cpus);
-
-out:
-    libxl_cpupoolinfo_list_free(poolinfo, n_pools);
-
-    return ret;
-}
-
-int libxl_cpupool_movedomain(libxl_ctx *ctx, uint32_t poolid, uint32_t domid)
-{
-    GC_INIT(ctx);
-    int rc;
-
-    rc = xc_cpupool_movedomain(ctx->xch, poolid, domid);
-    if (rc) {
-        LOGEVD(ERROR, rc, domid, "Error moving domain to cpupool");
-        GC_FREE;
-        return ERROR_FAIL;
-    }
-
-    GC_FREE;
-    return 0;
-}
-
 static int fd_set_flags(libxl_ctx *ctx, int fd,
                         int fcntlgetop, int fcntlsetop, const char *fl,
                         int flagmask, int set_p)
diff --git a/tools/libxl/libxl_cpupool.c b/tools/libxl/libxl_cpupool.c
new file mode 100644
index 0000000..f3d22b1
--- /dev/null
+++ b/tools/libxl/libxl_cpupool.c
@@ -0,0 +1,443 @@
+/*
+ * Copyright 2009-2017 Citrix Ltd and other contributors
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; version 2.1 only. with the special
+ * exception on linking described in file LICENSE.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ */
+
+#include "libxl_osdeps.h"
+
+#include "libxl_internal.h"
+
+/* Returns:
+ *   0 - success
+ *   ERROR_FAIL + errno == ENOENT - no entry found
+ *   ERROR_$FOO + errno != ENOENT - other failure
+ */
+static int cpupool_info(libxl__gc *gc,
+                        libxl_cpupoolinfo *info,
+                        uint32_t poolid,
+                        bool exact /* exactly poolid or >= poolid */)
+{
+    xc_cpupoolinfo_t *xcinfo;
+    int rc = ERROR_FAIL;
+
+    xcinfo = xc_cpupool_getinfo(CTX->xch, poolid);
+    if (xcinfo == NULL)
+    {
+        if (exact || errno != ENOENT)
+            LOGE(ERROR, "failed to get info for cpupool%d", poolid);
+        return ERROR_FAIL;
+    }
+
+    if (exact && xcinfo->cpupool_id != poolid)
+    {
+        LOG(ERROR, "got info for cpupool%d, wanted cpupool%d\n",
+            xcinfo->cpupool_id, poolid);
+        goto out;
+    }
+
+    info->poolid = xcinfo->cpupool_id;
+    info->pool_name = libxl_cpupoolid_to_name(CTX, info->poolid);
+    if (!info->pool_name) {
+        rc = ERROR_FAIL;
+        goto out;
+    }
+    info->sched = xcinfo->sched_id;
+    info->n_dom = xcinfo->n_dom;
+    rc = libxl_cpu_bitmap_alloc(CTX, &info->cpumap, 0);
+    if (rc)
+        goto out;
+
+    memcpy(info->cpumap.map, xcinfo->cpumap, info->cpumap.size);
+
+    rc = 0;
+out:
+    xc_cpupool_infofree(CTX->xch, xcinfo);
+    return rc;
+}
+
+int libxl_cpupool_info(libxl_ctx *ctx,
+                       libxl_cpupoolinfo *info, uint32_t poolid)
+{
+    GC_INIT(ctx);
+    int rc = cpupool_info(gc, info, poolid, true);
+    GC_FREE;
+    return rc;
+}
+
+libxl_cpupoolinfo * libxl_list_cpupool(libxl_ctx *ctx, int *nb_pool_out)
+{
+    GC_INIT(ctx);
+    libxl_cpupoolinfo info, *ptr;
+
+    int i;
+    uint32_t poolid;
+
+    ptr = NULL;
+
+    poolid = 0;
+    for (i = 0;; i++) {
+        libxl_cpupoolinfo_init(&info);
+        if (cpupool_info(gc, &info, poolid, false)) {
+            libxl_cpupoolinfo_dispose(&info);
+            if (errno != ENOENT) goto out;
+            break;
+        }
+
+        ptr = libxl__realloc(NOGC, ptr, (i+1) * sizeof(libxl_cpupoolinfo));
+        ptr[i] = info;
+        poolid = info.poolid + 1;
+        /* Don't dispose of info because it will be returned to caller */
+    }
+
+    *nb_pool_out = i;
+
+    GC_FREE;
+    return ptr;
+
+out:
+    libxl_cpupoolinfo_list_free(ptr, i);
+    *nb_pool_out = 0;
+    GC_FREE;
+    return NULL;
+}
+
+int libxl_get_freecpus(libxl_ctx *ctx, libxl_bitmap *cpumap)
+{
+    int ncpus;
+
+    ncpus = libxl_get_max_cpus(ctx);
+    if (ncpus < 0)
+        return ncpus;
+
+    cpumap->map = xc_cpupool_freeinfo(ctx->xch);
+    if (cpumap->map == NULL)
+        return ERROR_FAIL;
+
+    cpumap->size = (ncpus + 7) / 8;
+
+    return 0;
+}
+
+int libxl_cpupool_create(libxl_ctx *ctx, const char *name,
+                         libxl_scheduler sched,
+                         libxl_bitmap cpumap, libxl_uuid *uuid,
+                         uint32_t *poolid)
+{
+    GC_INIT(ctx);
+    int rc;
+    int i;
+    xs_transaction_t t;
+    char *uuid_string;
+
+    uuid_string = libxl__uuid2string(gc, *uuid);
+    if (!uuid_string) {
+        GC_FREE;
+        return ERROR_NOMEM;
+    }
+
+    rc = xc_cpupool_create(ctx->xch, poolid, sched);
+    if (rc) {
+        LOGEV(ERROR, rc, "Could not create cpupool");
+        GC_FREE;
+        return ERROR_FAIL;
+    }
+
+    libxl_for_each_bit(i, cpumap)
+        if (libxl_bitmap_test(&cpumap, i)) {
+            rc = xc_cpupool_addcpu(ctx->xch, *poolid, i);
+            if (rc) {
+                LOGEV(ERROR, rc, "Error moving cpu to cpupool");
+                libxl_cpupool_destroy(ctx, *poolid);
+                GC_FREE;
+                return ERROR_FAIL;
+            }
+        }
+
+    for (;;) {
+        t = xs_transaction_start(ctx->xsh);
+
+        xs_mkdir(ctx->xsh, t, GCSPRINTF("/local/pool/%d", *poolid));
+        libxl__xs_printf(gc, t,
+                         GCSPRINTF("/local/pool/%d/uuid", *poolid),
+                         "%s", uuid_string);
+        libxl__xs_printf(gc, t,
+                         GCSPRINTF("/local/pool/%d/name", *poolid),
+                         "%s", name);
+
+        if (xs_transaction_end(ctx->xsh, t, 0) || (errno != EAGAIN)) {
+            GC_FREE;
+            return 0;
+        }
+    }
+}
+
+int libxl_cpupool_destroy(libxl_ctx *ctx, uint32_t poolid)
+{
+    GC_INIT(ctx);
+    int rc, i;
+    xc_cpupoolinfo_t *info;
+    xs_transaction_t t;
+    libxl_bitmap cpumap;
+
+    info = xc_cpupool_getinfo(ctx->xch, poolid);
+    if (info == NULL) {
+        GC_FREE;
+        return ERROR_NOMEM;
+    }
+
+    rc = ERROR_INVAL;
+    if ((info->cpupool_id != poolid) || (info->n_dom))
+        goto out;
+
+    rc = libxl_cpu_bitmap_alloc(ctx, &cpumap, 0);
+    if (rc)
+        goto out;
+
+    memcpy(cpumap.map, info->cpumap, cpumap.size);
+    libxl_for_each_bit(i, cpumap)
+        if (libxl_bitmap_test(&cpumap, i)) {
+            rc = xc_cpupool_removecpu(ctx->xch, poolid, i);
+            if (rc) {
+                LOGEV(ERROR, rc, "Error removing cpu from cpupool");
+                rc = ERROR_FAIL;
+                goto out1;
+            }
+        }
+
+    rc = xc_cpupool_destroy(ctx->xch, poolid);
+    if (rc) {
+        LOGEV(ERROR, rc, "Could not destroy cpupool");
+        rc = ERROR_FAIL;
+        goto out1;
+    }
+
+    for (;;) {
+        t = xs_transaction_start(ctx->xsh);
+
+        xs_rm(ctx->xsh, XBT_NULL, GCSPRINTF("/local/pool/%d", poolid));
+
+        if (xs_transaction_end(ctx->xsh, t, 0) || (errno != EAGAIN))
+            break;
+    }
+
+    rc = 0;
+
+out1:
+    libxl_bitmap_dispose(&cpumap);
+out:
+    xc_cpupool_infofree(ctx->xch, info);
+    GC_FREE;
+
+    return rc;
+}
+
+int libxl_cpupool_rename(libxl_ctx *ctx, const char *name, uint32_t poolid)
+{
+    GC_INIT(ctx);
+    xs_transaction_t t;
+    xc_cpupoolinfo_t *info;
+    int rc;
+
+    info = xc_cpupool_getinfo(ctx->xch, poolid);
+    if (info == NULL) {
+        GC_FREE;
+        return ERROR_NOMEM;
+    }
+
+    rc = ERROR_INVAL;
+    if (info->cpupool_id != poolid)
+        goto out;
+
+    rc = 0;
+
+    for (;;) {
+        t = xs_transaction_start(ctx->xsh);
+
+        libxl__xs_printf(gc, t,
+                         GCSPRINTF("/local/pool/%d/name", poolid),
+                         "%s", name);
+
+        if (xs_transaction_end(ctx->xsh, t, 0))
+            break;
+
+        if (errno == EAGAIN)
+            continue;
+
+        rc = ERROR_FAIL;
+        break;
+    }
+
+out:
+    xc_cpupool_infofree(ctx->xch, info);
+    GC_FREE;
+
+    return rc;
+}
+
+int libxl_cpupool_cpuadd(libxl_ctx *ctx, uint32_t poolid, int cpu)
+{
+    GC_INIT(ctx);
+    int rc = 0;
+
+    rc = xc_cpupool_addcpu(ctx->xch, poolid, cpu);
+    if (rc) {
+        LOGE(ERROR, "Error moving cpu %d to cpupool", cpu);
+        rc = ERROR_FAIL;
+    }
+
+    GC_FREE;
+    return rc;
+}
+
+int libxl_cpupool_cpuadd_cpumap(libxl_ctx *ctx, uint32_t poolid,
+                                const libxl_bitmap *cpumap)
+{
+    int c, ncpus = 0, rc = 0;
+
+    libxl_for_each_set_bit(c, *cpumap) {
+        if (!libxl_cpupool_cpuadd(ctx, poolid, c))
+            ncpus++;
+    }
+
+    if (ncpus != libxl_bitmap_count_set(cpumap))
+        rc = ERROR_FAIL;
+
+    return rc;
+}
+
+int libxl_cpupool_cpuadd_node(libxl_ctx *ctx, uint32_t poolid, int node, int *cpus)
+{
+    int rc = 0;
+    int cpu, nr;
+    libxl_bitmap freemap;
+    libxl_cputopology *topology;
+
+    if (libxl_get_freecpus(ctx, &freemap)) {
+        return ERROR_FAIL;
+    }
+
+    topology = libxl_get_cpu_topology(ctx, &nr);
+    if (!topology) {
+        rc = ERROR_FAIL;
+        goto out;
+    }
+
+    *cpus = 0;
+    for (cpu = 0; cpu < nr; cpu++) {
+        if (libxl_bitmap_test(&freemap, cpu) && (topology[cpu].node == node) &&
+            !libxl_cpupool_cpuadd(ctx, poolid, cpu)) {
+                (*cpus)++;
+        }
+        libxl_cputopology_dispose(&topology[cpu]);
+    }
+
+    free(topology);
+out:
+    libxl_bitmap_dispose(&freemap);
+    return rc;
+}
+
+int libxl_cpupool_cpuremove(libxl_ctx *ctx, uint32_t poolid, int cpu)
+{
+    GC_INIT(ctx);
+    int rc = 0;
+
+    rc = xc_cpupool_removecpu(ctx->xch, poolid, cpu);
+    if (rc) {
+        LOGE(ERROR, "Error removing cpu %d from cpupool", cpu);
+        rc = ERROR_FAIL;
+    }
+
+    GC_FREE;
+    return rc;
+}
+
+int libxl_cpupool_cpuremove_cpumap(libxl_ctx *ctx, uint32_t poolid,
+                                   const libxl_bitmap *cpumap)
+{
+    int c, ncpus = 0, rc = 0;
+
+    libxl_for_each_set_bit(c, *cpumap) {
+        if (!libxl_cpupool_cpuremove(ctx, poolid, c))
+            ncpus++;
+    }
+
+    if (ncpus != libxl_bitmap_count_set(cpumap))
+        rc = ERROR_FAIL;
+
+    return rc;
+}
+
+int libxl_cpupool_cpuremove_node(libxl_ctx *ctx, uint32_t poolid, int node, int *cpus)
+{
+    int ret = 0;
+    int n_pools;
+    int p;
+    int cpu, nr_cpus;
+    libxl_cputopology *topology;
+    libxl_cpupoolinfo *poolinfo;
+
+    poolinfo = libxl_list_cpupool(ctx, &n_pools);
+    if (!poolinfo) {
+        return ERROR_NOMEM;
+    }
+
+    topology = libxl_get_cpu_topology(ctx, &nr_cpus);
+    if (!topology) {
+        ret = ERROR_FAIL;
+        goto out;
+    }
+
+    *cpus = 0;
+    for (p = 0; p < n_pools; p++) {
+        if (poolinfo[p].poolid == poolid) {
+            for (cpu = 0; cpu < nr_cpus; cpu++) {
+                if ((topology[cpu].node == node) &&
+                    libxl_bitmap_test(&poolinfo[p].cpumap, cpu) &&
+                    !libxl_cpupool_cpuremove(ctx, poolid, cpu)) {
+                        (*cpus)++;
+                }
+            }
+        }
+    }
+
+    libxl_cputopology_list_free(topology, nr_cpus);
+
+out:
+    libxl_cpupoolinfo_list_free(poolinfo, n_pools);
+
+    return ret;
+}
+
+int libxl_cpupool_movedomain(libxl_ctx *ctx, uint32_t poolid, uint32_t domid)
+{
+    GC_INIT(ctx);
+    int rc;
+
+    rc = xc_cpupool_movedomain(ctx->xch, poolid, domid);
+    if (rc) {
+        LOGEVD(ERROR, rc, domid, "Error moving domain to cpupool");
+        GC_FREE;
+        return ERROR_FAIL;
+    }
+
+    GC_FREE;
+    return 0;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
-- 
2.10.2


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

  parent reply	other threads:[~2017-02-08 13:13 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-02-08 13:13 [PATCH 0/9] libxl: spolit up libxl.c Juergen Gross
2017-02-08 13:13 ` [PATCH 1/9] libxl: adjust copyright comment of libxl.c Juergen Gross
2017-02-08 13:13 ` Juergen Gross [this message]
2017-02-08 13:13 ` [PATCH 3/9] libxl: carve out scheduler specific functions from libxl.c Juergen Gross
2017-02-08 13:13 ` [PATCH 4/9] libxl: carve out disk " Juergen Gross
2017-02-08 13:13 ` [PATCH 5/9] libxl: carve out console " Juergen Gross
2017-02-08 13:13 ` [PATCH 6/9] libxl: carve out memory " Juergen Gross
2017-02-08 13:13 ` [PATCH 7/9] libxl: move device specific functions out of libxl.c Juergen Gross
2017-02-08 13:13 ` [PATCH 8/9] libxl: carve out tmem specific functions from libxl.c Juergen Gross
2017-02-08 13:13 ` [PATCH 9/9] libxl: carve out domain " Juergen Gross
2017-02-08 14:33 ` [PATCH 0/9] libxl: spolit up libxl.c Ian Jackson
2017-02-08 14:54   ` Juergen Gross
2017-02-08 16:12     ` Ian Jackson
2017-02-08 16:28       ` Juergen Gross
2017-02-08 14:35 ` Wei Liu
2017-02-08 16:54   ` Wei Liu
2017-02-09  1:51     ` Zhang Chen
2017-02-09  9:36       ` Wei Liu
2017-02-09  1:12   ` Haozhong Zhang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170208131346.20826-3-jgross@suse.com \
    --to=jgross@suse.com \
    --cc=haozhong.zhang@intel.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).