From: George Dunlap <george.dunlap@eu.citrix.com>
To: xen-devel@lists.xensource.com
Cc: george.dunlap@eu.citrix.com
Subject: [PATCH 3 of 3] xl: Call xlu_cfg_destroy in main_cpupoolcreate
Date: Fri, 11 May 2012 15:12:01 +0100 [thread overview]
Message-ID: <bf9a0309c2ba1bfe5daf.1336745521@kodo2> (raw)
In-Reply-To: <patchbomb.1336745518@kodo2>
This involves making a goto clean-up path, rather than calling
return directly.
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
diff -r 25a5ebd0117f -r bf9a0309c2ba tools/libxl/xl_cmdimpl.c
--- a/tools/libxl/xl_cmdimpl.c Fri May 11 14:49:22 2012 +0100
+++ b/tools/libxl/xl_cmdimpl.c Fri May 11 15:07:51 2012 +0100
@@ -5677,6 +5677,7 @@ int main_cpupoolcreate(int argc, char **
libxl_cpumap cpumap;
libxl_uuid uuid;
libxl_cputopology *topology;
+ int rc = -ERROR_FAIL;
while (1) {
opt = getopt_long(argc, argv, "hnf:", long_options, &option_index);
@@ -5710,7 +5711,7 @@ int main_cpupoolcreate(int argc, char **
filename = argv[optind];
} else {
help("cpupool-create");
- return -ERROR_FAIL;
+ goto out;
}
optind++;
}
@@ -5721,7 +5722,7 @@ int main_cpupoolcreate(int argc, char **
&config_len)) {
fprintf(stderr, "Failed to read config file: %s: %s\n",
filename, strerror(errno));
- return -ERROR_FAIL;
+ goto out;
}
config_src=filename;
}
@@ -5731,13 +5732,13 @@ int main_cpupoolcreate(int argc, char **
if (strlen(extra_config)) {
if (config_len > INT_MAX - (strlen(extra_config) + 2)) {
fprintf(stderr, "Failed to attach extra configration\n");
- return -ERROR_FAIL;
+ goto out;
}
config_data = xrealloc(config_data,
config_len + strlen(extra_config) + 2);
if (!config_data) {
fprintf(stderr, "Failed to realloc config_data\n");
- return -ERROR_FAIL;
+ goto out;
}
config_data[config_len] = 0;
strcat(config_data, extra_config);
@@ -5748,13 +5749,13 @@ int main_cpupoolcreate(int argc, char **
config = xlu_cfg_init(stderr, config_src);
if (!config) {
fprintf(stderr, "Failed to allocate for configuration\n");
- return -ERROR_FAIL;
+ goto out;
}
ret = xlu_cfg_readdata(config, config_data, config_len);
if (ret) {
fprintf(stderr, "Failed to parse config file: %s\n", strerror(ret));
- return -ERROR_FAIL;
+ goto out_cfg;
}
if (!xlu_cfg_get_string (config, "name", &buf, 0))
@@ -5763,32 +5764,32 @@ int main_cpupoolcreate(int argc, char **
name = libxl_basename(filename);
else {
fprintf(stderr, "Missing cpupool name!\n");
- return -ERROR_FAIL;
+ goto out_cfg;
}
if (!libxl_name_to_cpupoolid(ctx, name, &poolid)) {
fprintf(stderr, "Pool name \"%s\" already exists\n", name);
- return -ERROR_FAIL;
+ goto out_cfg;
}
if (!xlu_cfg_get_string (config, "sched", &buf, 0)) {
if ((libxl_scheduler_from_string(buf, &sched)) < 0) {
fprintf(stderr, "Unknown scheduler\n");
- return -ERROR_FAIL;
+ goto out_cfg;
}
} else {
if ((sched = libxl_get_scheduler(ctx)) < 0) {
fprintf(stderr, "get_scheduler sysctl failed.\n");
- return -ERROR_FAIL;
+ goto out_cfg;
}
}
if (libxl_get_freecpus(ctx, &freemap)) {
fprintf(stderr, "libxl_get_freecpus failed\n");
- return -ERROR_FAIL;
+ goto out_cfg;
}
if (libxl_cpumap_alloc(ctx, &cpumap)) {
fprintf(stderr, "Failed to allocate cpumap\n");
- return -ERROR_FAIL;
+ goto out_cfg;
}
if (!xlu_cfg_get_list(config, "nodes", &nodes, 0, 0)) {
int nr;
@@ -5797,7 +5798,7 @@ int main_cpupoolcreate(int argc, char **
topology = libxl_get_cpu_topology(ctx, &nr);
if (topology == NULL) {
fprintf(stderr, "libxl_get_topologyinfo failed\n");
- return -ERROR_FAIL;
+ goto out_cfg;
}
while ((buf = xlu_cfg_get_listitem(nodes, n_nodes)) != NULL) {
n = atoi(buf);
@@ -5815,7 +5816,7 @@ int main_cpupoolcreate(int argc, char **
if (n_cpus == 0) {
fprintf(stderr, "no free cpu found\n");
- return -ERROR_FAIL;
+ goto out_cfg;
}
} else if (!xlu_cfg_get_list(config, "cpus", &cpus, 0, 0)) {
n_cpus = 0;
@@ -5824,7 +5825,7 @@ int main_cpupoolcreate(int argc, char **
if ((i < 0) || (i >= freemap.size * 8) ||
!libxl_cpumap_test(&freemap, i)) {
fprintf(stderr, "cpu %d illegal or not free\n", i);
- return -ERROR_FAIL;
+ goto out_cfg;
}
libxl_cpumap_set(&cpumap, i);
n_cpus++;
@@ -5839,16 +5840,20 @@ int main_cpupoolcreate(int argc, char **
printf("scheduler: %s\n", libxl_scheduler_to_string(sched));
printf("number of cpus: %d\n", n_cpus);
- if (dryrun_only)
- return 0;
-
- poolid = 0;
- if (libxl_cpupool_create(ctx, name, sched, cpumap, &uuid, &poolid)) {
- fprintf(stderr, "error on creating cpupool\n");
- return -ERROR_FAIL;
- }
-
- return 0;
+ if (!dryrun_only) {
+ poolid = 0;
+ if (libxl_cpupool_create(ctx, name, sched, cpumap, &uuid, &poolid)) {
+ fprintf(stderr, "error on creating cpupool\n");
+ goto out_cfg;
+ }
+ }
+ /* We made it! */
+ rc = 0;
+
+out_cfg:
+ xlu_cfg_destroy(config);
+out:
+ return rc;
}
int main_cpupoollist(int argc, char **argv)
next prev parent reply other threads:[~2012-05-11 14:12 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-05-11 14:11 [PATCH 0 of 3] xl: Some clean-up George Dunlap
2012-05-11 14:11 ` [PATCH 1 of 3] xl: Replace memset with libxl_device_pci_init George Dunlap
2012-05-11 14:12 ` [PATCH 2 of 3] xl: Call xlu_cfg_destroy in the pciattach and pcidetach George Dunlap
2012-05-11 14:12 ` George Dunlap [this message]
2012-05-11 16:34 ` [PATCH 0 of 3] xl: Some clean-up Ian Jackson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=bf9a0309c2ba1bfe5daf.1336745521@kodo2 \
--to=george.dunlap@eu.citrix.com \
--cc=xen-devel@lists.xensource.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).