From: Harmandeep Kaur <write.harmandeep@gmail.com>
To: xen-devel@lists.xenproject.org
Cc: lars.kurth@citrix.com, wei.liu2@citrix.com,
ian.campbell@citrix.com, stefano.stabellini@eu.citrix.com,
dario.faggioli@citrix.com, ian.jackson@eu.citrix.com,
george.dunlap@citrix.com,
Harmandeep Kaur <write.harmandeep@gmail.com>
Subject: [PATCH v3 4/5] xl: improve return and exit codes of cpupool related functions
Date: Tue, 27 Oct 2015 15:20:51 +0530 [thread overview]
Message-ID: <1445939452-8091-5-git-send-email-write.harmandeep@gmail.com> (raw)
In-Reply-To: <1445939452-8091-1-git-send-email-write.harmandeep@gmail.com>
Turning cpupools related functions exit codes towards using the
EXIT_[SUCCESS|FAILURE] constants, instead of instead of arbitrary numbers
or libxl return codes.
Signed-off-by: Harmandeep Kaur <write.harmandeep@gmail.com>
---
v2->v3: Edited changelog.
tools/libxl/xl_cmdimpl.c | 52 ++++++++++++++++++++++++------------------------
1 file changed, 26 insertions(+), 26 deletions(-)
diff --git a/tools/libxl/xl_cmdimpl.c b/tools/libxl/xl_cmdimpl.c
index 1dae28f..76d9af6 100644
--- a/tools/libxl/xl_cmdimpl.c
+++ b/tools/libxl/xl_cmdimpl.c
@@ -7314,7 +7314,7 @@ int main_cpupoolcreate(int argc, char **argv)
libxl_bitmap cpumap;
libxl_uuid uuid;
libxl_cputopology *topology;
- int rc = 1;
+ int rc = EXIT_FAILURE;
SWITCH_FOREACH_OPT(opt, "nf:", opts, "cpupool-create", 0) {
case 'f':
@@ -7484,7 +7484,7 @@ int main_cpupoolcreate(int argc, char **argv)
}
}
/* We made it! */
- rc = 0;
+ rc = EXIT_SUCCESS;
out_cfg:
xlu_cfg_destroy(config);
@@ -7521,14 +7521,14 @@ int main_cpupoollist(int argc, char **argv)
pool = argv[optind];
if (libxl_name_to_cpupoolid(ctx, pool, &poolid)) {
fprintf(stderr, "Pool \'%s\' does not exist\n", pool);
- return 1;
+ return EXIT_FAILURE;
}
}
poolinfo = libxl_list_cpupool(ctx, &n_pools);
if (!poolinfo) {
fprintf(stderr, "error getting cpupool info\n");
- return 1;
+ return EXIT_FAILURE;
}
printf("%-19s", "Name");
@@ -7559,7 +7559,7 @@ int main_cpupoollist(int argc, char **argv)
libxl_cpupoolinfo_list_free(poolinfo, n_pools);
- return 0;
+ return EXIT_SUCCESS;
}
int main_cpupooldestroy(int argc, char **argv)
@@ -7577,13 +7577,13 @@ int main_cpupooldestroy(int argc, char **argv)
if (libxl_cpupool_qualifier_to_cpupoolid(ctx, pool, &poolid, NULL) ||
!libxl_cpupoolid_is_valid(ctx, poolid)) {
fprintf(stderr, "unknown cpupool '%s'\n", pool);
- return 1;
+ return EXIT_FAILURE;
}
if (libxl_cpupool_destroy(ctx, poolid))
- return 1;
+ return EXIT_FAILURE;
- return 0;
+ return EXIT_SUCCESS;
}
int main_cpupoolrename(int argc, char **argv)
@@ -7602,17 +7602,17 @@ int main_cpupoolrename(int argc, char **argv)
if (libxl_cpupool_qualifier_to_cpupoolid(ctx, pool, &poolid, NULL) ||
!libxl_cpupoolid_is_valid(ctx, poolid)) {
fprintf(stderr, "unknown cpupool '%s'\n", pool);
- return 1;
+ return EXIT_FAILURE;
}
new_name = argv[optind];
if (libxl_cpupool_rename(ctx, new_name, poolid)) {
fprintf(stderr, "Can't rename cpupool '%s'\n", pool);
- return 1;
+ return EXIT_FAILURE;
}
- return 0;
+ return EXIT_SUCCESS;
}
int main_cpupoolcpuadd(int argc, char **argv)
@@ -7621,7 +7621,7 @@ int main_cpupoolcpuadd(int argc, char **argv)
const char *pool;
uint32_t poolid;
libxl_bitmap cpumap;
- int rc = 1;
+ int rc = EXIT_FAILURE;
SWITCH_FOREACH_OPT(opt, "", NULL, "cpupool-cpu-add", 2) {
/* No options */
@@ -7630,7 +7630,7 @@ int main_cpupoolcpuadd(int argc, char **argv)
libxl_bitmap_init(&cpumap);
if (libxl_cpu_bitmap_alloc(ctx, &cpumap, 0)) {
fprintf(stderr, "Unable to allocate cpumap");
- return 1;
+ return EXIT_FAILURE;
}
pool = argv[optind++];
@@ -7646,7 +7646,7 @@ int main_cpupoolcpuadd(int argc, char **argv)
if (libxl_cpupool_cpuadd_cpumap(ctx, poolid, &cpumap))
fprintf(stderr, "some cpus may not have been added to %s\n", pool);
- rc = 0;
+ rc = EXIT_SUCCESS;
out:
libxl_bitmap_dispose(&cpumap);
@@ -7659,12 +7659,12 @@ int main_cpupoolcpuremove(int argc, char **argv)
const char *pool;
uint32_t poolid;
libxl_bitmap cpumap;
- int rc = 1;
+ int rc = EXIT_FAILURE;
libxl_bitmap_init(&cpumap);
if (libxl_cpu_bitmap_alloc(ctx, &cpumap, 0)) {
fprintf(stderr, "Unable to allocate cpumap");
- return 1;
+ return EXIT_FAILURE;
}
SWITCH_FOREACH_OPT(opt, "", NULL, "cpupool-cpu-remove", 2) {
@@ -7684,7 +7684,7 @@ int main_cpupoolcpuremove(int argc, char **argv)
if (libxl_cpupool_cpuremove_cpumap(ctx, poolid, &cpumap))
fprintf(stderr, "some cpus may not have been removed from %s\n", pool);
- rc = 0;
+ rc = EXIT_SUCCESS;
out:
libxl_bitmap_dispose(&cpumap);
@@ -7709,19 +7709,19 @@ int main_cpupoolmigrate(int argc, char **argv)
if (libxl_domain_qualifier_to_domid(ctx, dom, &domid) ||
!libxl_domid_to_name(ctx, domid)) {
fprintf(stderr, "unknown domain '%s'\n", dom);
- return 1;
+ return EXIT_FAILURE;
}
if (libxl_cpupool_qualifier_to_cpupoolid(ctx, pool, &poolid, NULL) ||
!libxl_cpupoolid_is_valid(ctx, poolid)) {
fprintf(stderr, "unknown cpupool '%s'\n", pool);
- return 1;
+ return EXIT_FAILURE;
}
if (libxl_cpupool_movedomain(ctx, poolid, domid))
- return 1;
+ return EXIT_FAILURE;
- return 0;
+ return EXIT_SUCCESS;
}
int main_cpupoolnumasplit(int argc, char **argv)
@@ -7749,13 +7749,13 @@ int main_cpupoolnumasplit(int argc, char **argv)
libxl_dominfo_init(&info);
- rc = 1;
+ rc = EXIT_FAILURE;
libxl_bitmap_init(&cpumap);
poolinfo = libxl_list_cpupool(ctx, &n_pools);
if (!poolinfo) {
fprintf(stderr, "error getting cpupool info\n");
- return 1;
+ return EXIT_FAILURE;
}
poolid = poolinfo[0].poolid;
sched = poolinfo[0].sched;
@@ -7763,13 +7763,13 @@ int main_cpupoolnumasplit(int argc, char **argv)
if (n_pools > 1) {
fprintf(stderr, "splitting not possible, already cpupools in use\n");
- return 1;
+ return EXIT_FAILURE;
}
topology = libxl_get_cpu_topology(ctx, &n_cpus);
if (topology == NULL) {
fprintf(stderr, "libxl_get_topologyinfo failed\n");
- return 1;
+ return EXIT_FAILURE;
}
if (libxl_cpu_bitmap_alloc(ctx, &cpumap, 0)) {
@@ -7858,7 +7858,7 @@ int main_cpupoolnumasplit(int argc, char **argv)
}
}
- rc = 0;
+ rc = EXIT_SUCCESS;
out:
libxl_cputopology_list_free(topology, n_cpus);
--
1.9.1
next prev parent reply other threads:[~2015-10-27 9:52 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-10-27 9:50 [PATCH v3 0/5] xl: convert exit codes to EXIT_[SUCCESS|FAILURE] Harmandeep Kaur
2015-10-27 9:50 ` [PATCH v3 1/5] xl: convert main() " Harmandeep Kaur
2015-10-27 11:11 ` Wei Liu
2015-10-27 11:20 ` Dario Faggioli
2015-10-27 9:50 ` [PATCH v3 2/5] xl: improve return and exit codes of scheduling related functions Harmandeep Kaur
2015-10-27 11:25 ` Dario Faggioli
2015-10-27 9:50 ` [PATCH v3 3/5] xl: improve return and exit codes of vcpu " Harmandeep Kaur
2015-10-27 11:27 ` Dario Faggioli
2015-10-27 9:50 ` Harmandeep Kaur [this message]
2015-10-27 11:29 ` [PATCH v3 4/5] xl: improve return and exit codes of cpupool " Dario Faggioli
2015-10-27 9:50 ` [PATCH v3 5/5] xl: improve return and exit codes of parse " Harmandeep Kaur
2015-10-27 11:41 ` Dario Faggioli
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1445939452-8091-5-git-send-email-write.harmandeep@gmail.com \
--to=write.harmandeep@gmail.com \
--cc=dario.faggioli@citrix.com \
--cc=george.dunlap@citrix.com \
--cc=ian.campbell@citrix.com \
--cc=ian.jackson@eu.citrix.com \
--cc=lars.kurth@citrix.com \
--cc=stefano.stabellini@eu.citrix.com \
--cc=wei.liu2@citrix.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).