From: Lee Schermerhorn <lee.schermerhorn@hp.com>
To: Cliff Wickman <cpi@sgi.com>
Cc: Kornilios Kourtis <kkourt@cslab.ece.ntua.gr>,
Brice Goglin <Brice.Goglin@inria.fr>,
linux-numa@vger.kernel.org, eric.whitney@hp.com
Subject: [PATCH 3/8] numactl/libnuma - more bitmask leak fixes
Date: Tue, 28 Apr 2009 12:36:38 -0400 [thread overview]
Message-ID: <20090428163638.24945.53155.sendpatchset@localhost.localdomain> (raw)
In-Reply-To: <20090428163621.24945.95516.sendpatchset@localhost.localdomain>
[PATCH 03/08] - reorganize code to plug apparent bitmask leaks
Against: numactl-2.0.3-rc2
The remainder of the fixex for apparent leaks. These required a bit
more reorg to address.
libnuma.c | 75 ++++++++++++++++++++++++++++++++++++++------------------------
1 files changed, 47 insertions(+), 28 deletions(-)
Index: numactl-2.0.3-rc2/libnuma.c
===================================================================
--- numactl-2.0.3-rc2.orig/libnuma.c 2009-04-27 09:44:52.000000000 -0400
+++ numactl-2.0.3-rc2/libnuma.c 2009-04-27 16:49:39.000000000 -0400
@@ -1031,17 +1031,19 @@ numa_get_membind_v1(void)
{
int oldpolicy;
struct bitmask *bmp;
- nodemask_t *nmp;
+ nodemask_t nmp;
bmp = allocate_nodemask_v1();
getpol(&oldpolicy, bmp);
if (oldpolicy == MPOL_BIND) {
- nmp = (nodemask_t *)bmp->maskp;
- return *nmp;
+ copy_bitmask_to_nodemask(bmp, &nmp);
+ } else {
+ /* copy the body of the map to numa_all_nodes */
+ copy_bitmask_to_nodemask(bmp, &numa_all_nodes);
+ nmp = numa_all_nodes;
}
- /* copy the body of the map to numa_all_nodes */
- copy_bitmask_to_nodemask(bmp, &numa_all_nodes);
- return numa_all_nodes;
+ numa_bitmask_free(bmp);
+ return nmp;
}
__asm__(".symver numa_get_membind_v1,numa_get_membind@libnuma_1.1");
@@ -1411,14 +1413,16 @@ numa_get_run_node_mask_v1(void)
int i, k;
int max = numa_max_node_int();
struct bitmask *bmp, *cpus, *nodecpus;
- nodemask_t *nmp;
+ nodemask_t nmp;
- bmp = allocate_nodemask_v1(); /* the size of a nodemask_t */
cpus = numa_allocate_cpumask();
- nodecpus = numa_allocate_cpumask();
- if (numa_sched_getaffinity_v2_int(0, cpus) < 0)
- return numa_no_nodes;
+ if (numa_sched_getaffinity_v2_int(0, cpus) < 0){
+ nmp = numa_no_nodes;
+ goto free_cpus;
+ }
+ nodecpus = numa_allocate_cpumask();
+ bmp = allocate_nodemask_v1(); /* the size of a nodemask_t */
for (i = 0; i <= max; i++) {
if (numa_node_to_cpus_v2_int(i, nodecpus) < 0) {
/* It's possible for the node to not exist */
@@ -1429,8 +1433,12 @@ numa_get_run_node_mask_v1(void)
numa_bitmask_setbit(bmp, i);
}
}
- nmp = (nodemask_t *)bmp->maskp;
- return *nmp;
+ copy_bitmask_to_nodemask(bmp, &nmp);
+ numa_bitmask_free(bmp);
+ numa_bitmask_free(nodecpus);
+free_cpus:
+ numa_bitmask_free(cpus);
+ return nmp;
}
__asm__(".symver numa_get_run_node_mask_v1,numa_get_run_node_mask@libnuma_1.1");
@@ -1442,13 +1450,15 @@ numa_get_run_node_mask_v2(void)
int max = numa_max_node_int();
struct bitmask *bmp, *cpus, *nodecpus;
+
bmp = numa_allocate_cpumask();
cpus = numa_allocate_cpumask();
- nodecpus = numa_allocate_cpumask();
-
- if (numa_sched_getaffinity_v2_int(0, cpus) < 0)
- return numa_no_nodes_ptr;
+ if (numa_sched_getaffinity_v2_int(0, cpus) < 0){
+ copy_bitmask_to_bitmask(numa_no_nodes_ptr, bmp);
+ goto free_cpus;
+ }
+ nodecpus = numa_allocate_cpumask();
for (i = 0; i <= max; i++) {
if (numa_node_to_cpus_v2_int(i, nodecpus) < 0) {
/* It's possible for the node to not exist */
@@ -1459,6 +1469,9 @@ numa_get_run_node_mask_v2(void)
numa_bitmask_setbit(bmp, i);
}
}
+ numa_bitmask_free(nodecpus);
+free_cpus:
+ numa_bitmask_free(cpus);
return bmp;
}
__asm__(".symver numa_get_run_node_mask_v2,numa_get_run_node_mask@@libnuma_1.2");
@@ -1481,22 +1494,28 @@ int numa_move_pages(int pid, unsigned lo
int numa_run_on_node(int node)
{
int numa_num_nodes = numa_num_possible_nodes();
+ int ret = -1;
struct bitmask *cpus;
+ if (node >= numa_num_nodes){
+ errno = EINVAL;
+ goto out;
+ }
+
cpus = numa_allocate_cpumask();
- if (node == -1) {
+
+ if (node == -1)
numa_bitmask_setall(cpus);
- } else if (node < numa_num_nodes) {
- if (numa_node_to_cpus_v2_int(node, cpus) < 0) {
- numa_warn(W_noderunmask,
- "Cannot read node cpumask from sysfs");
- return -1;
- }
- } else {
- errno = EINVAL;
- return -1;
+ else if (numa_node_to_cpus_v2_int(node, cpus) < 0){
+ numa_warn(W_noderunmask, "Cannot read node cpumask from sysfs");
+ goto free;
}
- return numa_sched_setaffinity_v2_int(0, cpus);
+
+ ret = numa_sched_setaffinity_v2_int(0, cpus);
+free:
+ numa_bitmask_free(cpus);
+out:
+ return ret;
}
int numa_preferred(void)
next prev parent reply other threads:[~2009-04-28 16:36 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2009-04-28 16:36 [PATCH 0/8] numactl/libnuma - Fixes and Cleanup Lee Schermerhorn
2009-04-28 16:36 ` [PATCH 1/8] numactl/libnuma - Possibly already fixed leaks and cleanup Lee Schermerhorn
2009-04-28 16:36 ` [PATCH 2/8] numactl/libnuma - Simple bitmask leak fixes Lee Schermerhorn
2009-04-28 16:36 ` Lee Schermerhorn [this message]
2009-04-28 16:36 ` [PATCH 4/8] numactl/libnuma - return freeable bitmasks Lee Schermerhorn
2009-04-28 16:36 ` [PATCH 5/8] numactl/libnuma - fix parsing of cpu, node mask Lee Schermerhorn
2009-04-28 16:36 ` [PATCH 6/8] numactl/numademo - eliminate page allocation overhead from memtest measurements Lee Schermerhorn
2009-04-28 16:37 ` [PATCH 7/8] numactl/Makefile - Generalize Makefile .so Version Lee Schermerhorn
2009-04-28 16:46 ` Andi Kleen
2009-04-28 17:01 ` Lee Schermerhorn
2009-04-28 17:10 ` Andi Kleen
2009-04-28 16:37 ` [PATCH 8/8] numactl/test - Make checkaffinity more robust Lee Schermerhorn
2009-04-28 19:38 ` [PATCH 0/8] numactl/libnuma - Fixes and Cleanup Cliff Wickman
2009-04-28 20:08 ` Lee Schermerhorn
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20090428163638.24945.53155.sendpatchset@localhost.localdomain \
--to=lee.schermerhorn@hp.com \
--cc=Brice.Goglin@inria.fr \
--cc=cpi@sgi.com \
--cc=eric.whitney@hp.com \
--cc=kkourt@cslab.ece.ntua.gr \
--cc=linux-numa@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).