From: Andreas Herrmann <andreas.herrmann3@amd.com>
To: Ingo Molnar <mingo@elte.hu>, Peter Zijlstra <peterz@infradead.org>
Cc: linux-kernel@vger.kernel.org
Subject: [PATCH 11/12] sched: Seperate out build of NUMA sched groups from __build_sched_domains
Date: Tue, 18 Aug 2009 13:01:11 +0200 [thread overview]
Message-ID: <20090818110111.GL29515@alberich.amd.com> (raw)
In-Reply-To: <20090818104944.GA29515@alberich.amd.com>
... to further strip down __build_sched_domains().
Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
---
kernel/sched.c | 130 +++++++++++++++++++++++++++++---------------------------
1 files changed, 67 insertions(+), 63 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index 8f53701..1933641 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8352,6 +8352,71 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
sg = sg->next;
} while (sg != group_head);
}
+
+static int build_numa_sched_groups(struct s_data *d,
+ const struct cpumask *cpu_map, int num)
+{
+ struct sched_domain *sd;
+ struct sched_group *sg, *prev;
+ int n, j;
+
+ cpumask_clear(d->covered);
+ cpumask_and(d->nodemask, cpumask_of_node(num), cpu_map);
+ if (cpumask_empty(d->nodemask)) {
+ d->sched_group_nodes[num] = NULL;
+ goto out;
+ }
+
+ sched_domain_node_span(num, d->domainspan);
+ cpumask_and(d->domainspan, d->domainspan, cpu_map);
+
+ sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
+ GFP_KERNEL, num);
+ if (!sg) {
+ printk(KERN_WARNING "Can not alloc domain group for node %d\n",
+ num);
+ return -ENOMEM;
+ }
+ d->sched_group_nodes[num] = sg;
+
+ for_each_cpu(j, d->nodemask) {
+ sd = &per_cpu(node_domains, j).sd;
+ sd->groups = sg;
+ }
+
+ sg->__cpu_power = 0;
+ cpumask_copy(sched_group_cpus(sg), d->nodemask);
+ sg->next = sg;
+ cpumask_or(d->covered, d->covered, d->nodemask);
+
+ prev = sg;
+ for (j = 0; j < nr_node_ids; j++) {
+ n = (num + j) % nr_node_ids;
+ cpumask_complement(d->notcovered, d->covered);
+ cpumask_and(d->tmpmask, d->notcovered, cpu_map);
+ cpumask_and(d->tmpmask, d->tmpmask, d->domainspan);
+ if (cpumask_empty(d->tmpmask))
+ break;
+ cpumask_and(d->tmpmask, d->tmpmask, cpumask_of_node(n));
+ if (cpumask_empty(d->tmpmask))
+ continue;
+ sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
+ GFP_KERNEL, num);
+ if (!sg) {
+ printk(KERN_WARNING
+ "Can not alloc domain group for node %d\n", j);
+ return -ENOMEM;
+ }
+ sg->__cpu_power = 0;
+ cpumask_copy(sched_group_cpus(sg), d->tmpmask);
+ sg->next = prev->next;
+ cpumask_or(d->covered, d->covered, d->tmpmask);
+ prev->next = sg;
+ prev = sg;
+ }
+out:
+ return 0;
+}
#endif /* CONFIG_NUMA */
#ifdef CONFIG_NUMA
@@ -8758,70 +8823,9 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
if (d.sd_allnodes)
build_sched_groups(&d, SD_LV_ALLNODES, cpu_map, 0);
- for (i = 0; i < nr_node_ids; i++) {
- /* Set up node groups */
- struct sched_group *sg, *prev;
- int j;
-
- cpumask_clear(d.covered);
- cpumask_and(d.nodemask, cpumask_of_node(i), cpu_map);
- if (cpumask_empty(d.nodemask)) {
- d.sched_group_nodes[i] = NULL;
- continue;
- }
-
- sched_domain_node_span(i, d.domainspan);
- cpumask_and(d.domainspan, d.domainspan, cpu_map);
-
- sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
- GFP_KERNEL, i);
- if (!sg) {
- printk(KERN_WARNING "Can not alloc domain group for "
- "node %d\n", i);
+ for (i = 0; i < nr_node_ids; i++)
+ if (build_numa_sched_groups(&d, cpu_map, i))
goto error;
- }
- d.sched_group_nodes[i] = sg;
- for_each_cpu(j, d.nodemask) {
- struct sched_domain *sd;
-
- sd = &per_cpu(node_domains, j).sd;
- sd->groups = sg;
- }
- sg->__cpu_power = 0;
- cpumask_copy(sched_group_cpus(sg), d.nodemask);
- sg->next = sg;
- cpumask_or(d.covered, d.covered, d.nodemask);
- prev = sg;
-
- for (j = 0; j < nr_node_ids; j++) {
- int n = (i + j) % nr_node_ids;
-
- cpumask_complement(d.notcovered, d.covered);
- cpumask_and(d.tmpmask, d.notcovered, cpu_map);
- cpumask_and(d.tmpmask, d.tmpmask, d.domainspan);
- if (cpumask_empty(d.tmpmask))
- break;
-
- cpumask_and(d.tmpmask, d.tmpmask, cpumask_of_node(n));
- if (cpumask_empty(d.tmpmask))
- continue;
-
- sg = kmalloc_node(sizeof(struct sched_group) +
- cpumask_size(),
- GFP_KERNEL, i);
- if (!sg) {
- printk(KERN_WARNING
- "Can not alloc domain group for node %d\n", j);
- goto error;
- }
- sg->__cpu_power = 0;
- cpumask_copy(sched_group_cpus(sg), d.tmpmask);
- sg->next = prev->next;
- cpumask_or(d.covered, d.covered, d.tmpmask);
- prev->next = sg;
- prev = sg;
- }
- }
#endif
/* Calculate CPU power for physical packages and nodes */
--
1.6.4
next prev parent reply other threads:[~2009-08-18 11:01 UTC|newest]
Thread overview: 30+ messages / expand[flat|nested] mbox.gz Atom feed top
2009-08-18 10:49 [PATCH 0/12] cleanup __build_sched_domains() Andreas Herrmann
2009-08-18 10:51 ` [PATCH 01/12] sched: Use structure to store local data in __build_sched_domains Andreas Herrmann
2009-08-18 16:51 ` [tip:sched/domains] " tip-bot for Andreas Herrmann
2009-08-18 10:53 ` [PATCH 02/12] sched: Separate out allocation/free/goto-hell from __build_sched_domains Andreas Herrmann
2009-08-18 12:57 ` Peter Zijlstra
2009-08-18 13:35 ` Andreas Herrmann
2009-08-18 16:52 ` [tip:sched/domains] " tip-bot for Andreas Herrmann
2009-08-18 10:54 ` [PATCH 03/12] sched: Seperate out build of NUMA sched domain " Andreas Herrmann
2009-08-18 16:52 ` [tip:sched/domains] sched: Separate " tip-bot for Andreas Herrmann
2009-08-18 10:54 ` [PATCH 04/12] sched: Seperate out build of CPU " Andreas Herrmann
2009-08-18 16:52 ` [tip:sched/domains] sched: Separate " tip-bot for Andreas Herrmann
2009-08-18 10:56 ` [PATCH 05/12] sched: Seperate out build of MC " Andreas Herrmann
2009-08-18 16:52 ` [tip:sched/domains] sched: Separate " tip-bot for Andreas Herrmann
2009-08-18 10:57 ` [PATCH 06/12] sched: Seperate out build of SMT " Andreas Herrmann
2009-08-18 16:52 ` [tip:sched/domains] sched: Separate " tip-bot for Andreas Herrmann
2009-08-18 10:57 ` [PATCH 07/12] sched: Seperate out build of SMT sched groups " Andreas Herrmann
2009-08-18 16:53 ` [tip:sched/domains] sched: Separate " tip-bot for Andreas Herrmann
2009-08-18 10:58 ` [PATCH 08/12] sched: Seperate out build of MC " Andreas Herrmann
2009-08-18 16:53 ` [tip:sched/domains] sched: Separate " tip-bot for Andreas Herrmann
2009-08-18 10:59 ` [PATCH 09/12] sched: Seperate out build of CPU " Andreas Herrmann
2009-08-18 16:53 ` [tip:sched/domains] sched: Separate " tip-bot for Andreas Herrmann
2009-08-18 11:00 ` [PATCH 10/12] sched: Seperate out build of ALLNODES " Andreas Herrmann
2009-08-18 16:53 ` [tip:sched/domains] sched: Separate " tip-bot for Andreas Herrmann
2009-08-18 11:01 ` Andreas Herrmann [this message]
2009-08-18 16:53 ` [tip:sched/domains] sched: Separate out build of NUMA " tip-bot for Andreas Herrmann
2009-08-18 11:02 ` [PATCH 12/12] sched: Consolidate definition of variable sd in __build_sched_domains Andreas Herrmann
2009-08-18 16:54 ` [tip:sched/domains] " tip-bot for Andreas Herrmann
2009-08-18 11:16 ` [PATCH 0/12] cleanup __build_sched_domains() Ingo Molnar
2009-08-18 13:15 ` Andreas Herrmann
2009-08-18 13:25 ` Peter Zijlstra
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20090818110111.GL29515@alberich.amd.com \
--to=andreas.herrmann3@amd.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@elte.hu \
--cc=peterz@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox