From: Dario Faggioli <dario.faggioli@citrix.com>
To: xen-devel@lists.xen.org
Cc: George Dunlap <george.dunlap@eu.citrix.com>,
Ian Jackson <Ian.Jackson@eu.citrix.com>,
Ian Campbell <Ian.Campbell@citrix.com>
Subject: [RESEND PATCH v5 5/9] libxc/libxl: sanitize error handling in *_get_max_{cpus, nodes}
Date: Sat, 07 Dec 2013 01:05:03 +0100 [thread overview]
Message-ID: <20131207000503.22982.18364.stgit@Solace> (raw)
In-Reply-To: <20131206233642.22982.76947.stgit@Solace>
In libxc, make xc_get_max_{cpus,node}() always return either a
positive number or -1, and change all the callers to deal with
that.
In libxl, make libxl_get_max_{cpus,nodes}() always return either a
positive number or a libxl error code. Thanks to that, it is also
possible to fix loggig for libxl_{cpu,node}_bitmap_alloc(), which
now happens inside the functions themselves, more accurately
reporting what happened.
Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>
Acked-by: George Dunlap <george.dunlap@eu.citrix.com>
Acked-by: Ian Jackson <ian.jackson@eu.citrix.com>
---
Changes from v5:
* code motion of libxl_{cpu,node}_bitmap_alloc() happened in
previous patch, so this one reworks the func in their new
place, as requested during review.
Changes from v4:
* fix error handling in xc_get_max_{cpus,nodes}() as well, as
suggested during review;
* no longer move libxl_{cpu,node}_bitmap_alloc() from .h to .c,
as requested during review;
* add more logging in libxl_{cpu,node}_bitmap_alloc(), as
suggested during review;
* propagate the error from libxl_get_max_{cpus,nodes}, as
suggested during review;
Changes from v3:
* switch the functions to LOG() / LOGE().
* take care of the callers of libxl_get_max_{cpus,nodes}() too.
Changes from v2:
* this wasn't there in v2, but fixing this for v3 was requested
during v2 review.
---
tools/libxc/xc_misc.c | 22 ++++++++++++---
tools/libxl/libxl.c | 16 ++++-------
tools/libxl/libxl_utils.c | 55 +++++++++++++++++++++++++++++--------
tools/python/xen/lowlevel/xc/xc.c | 6 ++--
4 files changed, 70 insertions(+), 29 deletions(-)
diff --git a/tools/libxc/xc_misc.c b/tools/libxc/xc_misc.c
index 56efe6a..c771469 100644
--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -30,9 +30,12 @@ int xc_get_max_cpus(xc_interface *xch)
return max_cpus;
if ( !xc_physinfo(xch, &physinfo) )
+ {
max_cpus = physinfo.max_cpu_id + 1;
+ return max_cpus;
+ }
- return max_cpus;
+ return -1;
}
int xc_get_max_nodes(xc_interface *xch)
@@ -44,19 +47,30 @@ int xc_get_max_nodes(xc_interface *xch)
return max_nodes;
if ( !xc_physinfo(xch, &physinfo) )
+ {
max_nodes = physinfo.max_node_id + 1;
+ return max_nodes;
+ }
- return max_nodes;
+ return -1;
}
int xc_get_cpumap_size(xc_interface *xch)
{
- return (xc_get_max_cpus(xch) + 7) / 8;
+ int max_cpus = xc_get_max_cpus(xch);
+
+ if ( max_cpus < 0 )
+ return -1;
+ return (max_cpus + 7) / 8;
}
int xc_get_nodemap_size(xc_interface *xch)
{
- return (xc_get_max_nodes(xch) + 7) / 8;
+ int max_nodes = xc_get_max_nodes(xch);
+
+ if ( max_nodes < 0 )
+ return -1;
+ return (max_nodes + 7) / 8;
}
xc_cpumap_t xc_cpumap_alloc(xc_interface *xch)
diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
index 2925e1e..fd8b988 100644
--- a/tools/libxl/libxl.c
+++ b/tools/libxl/libxl.c
@@ -615,10 +615,8 @@ static int cpupool_info(libxl__gc *gc,
info->n_dom = xcinfo->n_dom;
rc = libxl_cpu_bitmap_alloc(CTX, &info->cpumap, 0);
if (rc)
- {
- LOG(ERROR, "unable to allocate cpumap %d\n", rc);
goto out;
- }
+
memcpy(info->cpumap.map, xcinfo->cpumap, info->cpumap.size);
rc = 0;
@@ -4355,7 +4353,7 @@ libxl_cputopology *libxl_get_cpu_topology(libxl_ctx *ctx, int *nb_cpu_out)
int max_cpus;
max_cpus = libxl_get_max_cpus(ctx);
- if (max_cpus == 0)
+ if (max_cpus < 0)
{
LIBXL__LOG(ctx, XTL_ERROR, "Unable to determine number of CPUS");
ret = NULL;
@@ -4420,7 +4418,7 @@ libxl_numainfo *libxl_get_numainfo(libxl_ctx *ctx, int *nr)
int i, j, max_nodes;
max_nodes = libxl_get_max_nodes(ctx);
- if (max_nodes == 0)
+ if (max_nodes < 0)
{
LIBXL__LOG(ctx, XTL_ERROR, "Unable to determine number of NODES");
ret = NULL;
@@ -4545,10 +4543,8 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ctx *ctx, uint32_t domid,
*nr_vcpus_out <= domaininfo.max_vcpu_id;
++*nr_vcpus_out, ++ptr) {
libxl_bitmap_init(&ptr->cpumap);
- if (libxl_cpu_bitmap_alloc(ctx, &ptr->cpumap, 0)) {
- LOGE(ERROR, "allocating cpumap");
+ if (libxl_cpu_bitmap_alloc(ctx, &ptr->cpumap, 0))
goto err;
- }
if (xc_vcpu_getinfo(ctx->xch, domid, *nr_vcpus_out, &vcpuinfo) == -1) {
LOGE(ERROR, "getting vcpu info");
goto err;
@@ -5308,8 +5304,8 @@ int libxl_get_freecpus(libxl_ctx *ctx, libxl_bitmap *cpumap)
int ncpus;
ncpus = libxl_get_max_cpus(ctx);
- if (ncpus == 0)
- return ERROR_FAIL;
+ if (ncpus < 0)
+ return ncpus;
cpumap->map = xc_cpupool_freeinfo(ctx->xch);
if (cpumap->map == NULL)
diff --git a/tools/libxl/libxl_utils.c b/tools/libxl/libxl_utils.c
index 93f7a87..0833de2 100644
--- a/tools/libxl/libxl_utils.c
+++ b/tools/libxl/libxl_utils.c
@@ -653,27 +653,54 @@ char *libxl_bitmap_to_hex_string(libxl_ctx *ctx, const libxl_bitmap *bitmap)
int libxl_cpu_bitmap_alloc(libxl_ctx *ctx, libxl_bitmap *cpumap, int max_cpus)
{
- if (max_cpus < 0)
- return ERROR_INVAL;
+ GC_INIT(ctx);
+ int rc = 0;
+
+ if (max_cpus < 0) {
+ rc = ERROR_INVAL;
+ LOG(ERROR, "invalid number of cpus provided");
+ goto out;
+ }
if (max_cpus == 0)
max_cpus = libxl_get_max_cpus(ctx);
- if (max_cpus == 0)
- return ERROR_FAIL;
+ if (max_cpus < 0) {
+ LOG(ERROR, "failed to retrieve the maximum number of cpus");
+ rc = max_cpus;
+ goto out;
+ }
+ /* This can't fail: no need to check and log */
+ libxl_bitmap_alloc(ctx, cpumap, max_cpus);
- return libxl_bitmap_alloc(ctx, cpumap, max_cpus);
+ out:
+ GC_FREE;
+ return rc;
}
int libxl_node_bitmap_alloc(libxl_ctx *ctx, libxl_bitmap *nodemap,
int max_nodes)
{
- if (max_nodes < 0)
- return ERROR_INVAL;
+ GC_INIT(ctx);
+ int rc = 0;
+
+ if (max_nodes < 0) {
+ rc = ERROR_INVAL;
+ LOG(ERROR, "invalid number of nodes provided");
+ goto out;
+ }
+
if (max_nodes == 0)
max_nodes = libxl_get_max_nodes(ctx);
- if (max_nodes == 0)
- return ERROR_FAIL;
+ if (max_nodes < 0) {
+ LOG(ERROR, "failed to retrieve the maximum number of nodes");
+ rc = max_nodes;
+ goto out;
+ }
+ /* This can't fail: no need to check and log */
+ libxl_bitmap_alloc(ctx, nodemap, max_nodes);
- return libxl_bitmap_alloc(ctx, nodemap, max_nodes);
+ out:
+ GC_FREE;
+ return rc;
}
int libxl_nodemap_to_cpumap(libxl_ctx *ctx,
@@ -744,12 +771,16 @@ int libxl_cpumap_to_nodemap(libxl_ctx *ctx,
int libxl_get_max_cpus(libxl_ctx *ctx)
{
- return xc_get_max_cpus(ctx->xch);
+ int max_cpus = xc_get_max_cpus(ctx->xch);
+
+ return max_cpus < 0 ? ERROR_FAIL : max_cpus;
}
int libxl_get_max_nodes(libxl_ctx *ctx)
{
- return xc_get_max_nodes(ctx->xch);
+ int max_nodes = xc_get_max_nodes(ctx->xch);
+
+ return max_nodes < 0 ? ERROR_FAIL : max_nodes;
}
int libxl__enum_from_string(const libxl_enum_string_table *t,
diff --git a/tools/python/xen/lowlevel/xc/xc.c b/tools/python/xen/lowlevel/xc/xc.c
index 2625fc4..737bdac 100644
--- a/tools/python/xen/lowlevel/xc/xc.c
+++ b/tools/python/xen/lowlevel/xc/xc.c
@@ -233,7 +233,7 @@ static PyObject *pyxc_vcpu_setaffinity(XcObject *self,
return NULL;
nr_cpus = xc_get_max_cpus(self->xc_handle);
- if ( nr_cpus == 0 )
+ if ( nr_cpus < 0 )
return pyxc_error_to_exception(self->xc_handle);
cpumap = xc_cpumap_alloc(self->xc_handle);
@@ -392,7 +392,7 @@ static PyObject *pyxc_vcpu_getinfo(XcObject *self,
return NULL;
nr_cpus = xc_get_max_cpus(self->xc_handle);
- if ( nr_cpus == 0 )
+ if ( nr_cpus < 0 )
return pyxc_error_to_exception(self->xc_handle);
rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info);
@@ -1923,7 +1923,7 @@ static PyObject *cpumap_to_cpulist(XcObject *self, xc_cpumap_t cpumap)
int nr_cpus;
nr_cpus = xc_get_max_cpus(self->xc_handle);
- if ( nr_cpus == 0 )
+ if ( nr_cpus < 0 )
return pyxc_error_to_exception(self->xc_handle);
cpulist = PyList_New(0);
next prev parent reply other threads:[~2013-12-07 0:05 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-12-07 0:04 [RESEND PATCH v5 0/9] vcpu soft affinity for credit1 (first half only) Dario Faggioli
2013-12-07 0:04 ` [RESEND PATCH v5 1/9] xl: match output of vcpu-list with pinning syntax Dario Faggioli
2013-12-07 0:04 ` [RESEND PATCH v5 2/9] libxl: better name for parameters in libxl_list_vcpu Dario Faggioli
2013-12-07 0:04 ` [RESEND PATCH v5 3/9] libxl: fix memory leak " Dario Faggioli
2013-12-07 0:04 ` [RESEND PATCH v5 4/9] libxl: move libxl_{cpu, node}_bitmap_alloc() Dario Faggioli
2013-12-09 15:16 ` Ian Campbell
2013-12-07 0:05 ` Dario Faggioli [this message]
2013-12-07 0:05 ` [RESEND PATCH v5 6/9] libxc/libxl: allow to retrieve the number of online pCPUs Dario Faggioli
2013-12-07 0:05 ` [RESEND PATCH v5 7/9] xl: allow for node-wise specification of vcpu pinning Dario Faggioli
2013-12-07 0:05 ` [RESEND PATCH v5 8/9] xl: implement and enable dryrun mode for `xl vcpu-pin' Dario Faggioli
2013-12-07 0:05 ` [RESEND PATCH v5 9/9] xl: test script for the cpumap parser (for vCPU pinning) Dario Faggioli
2013-12-09 15:45 ` [RESEND PATCH v5 0/9] vcpu soft affinity for credit1 (first half only) Ian Campbell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20131207000503.22982.18364.stgit@Solace \
--to=dario.faggioli@citrix.com \
--cc=Ian.Campbell@citrix.com \
--cc=Ian.Jackson@eu.citrix.com \
--cc=george.dunlap@eu.citrix.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).