* [PATCH 01/12] sched: Use structure to store local data in __build_sched_domains
2009-08-18 10:49 [PATCH 0/12] cleanup __build_sched_domains() Andreas Herrmann
@ 2009-08-18 10:51 ` Andreas Herrmann
2009-08-18 16:51 ` [tip:sched/domains] " tip-bot for Andreas Herrmann
2009-08-18 10:53 ` [PATCH 02/12] sched: Separate out allocation/free/goto-hell from __build_sched_domains Andreas Herrmann
` (11 subsequent siblings)
12 siblings, 1 reply; 30+ messages in thread
From: Andreas Herrmann @ 2009-08-18 10:51 UTC (permalink / raw)
To: Ingo Molnar, Peter Zijlstra; +Cc: linux-kernel
Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
---
kernel/sched.c | 165 ++++++++++++++++++++++++++++++--------------------------
1 files changed, 89 insertions(+), 76 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index 5184580..30fc914 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8197,6 +8197,22 @@ struct static_sched_domain {
DECLARE_BITMAP(span, CONFIG_NR_CPUS);
};
+struct s_data {
+#ifdef CONFIG_NUMA
+ int sd_allnodes;
+ cpumask_var_t domainspan;
+ cpumask_var_t covered;
+ cpumask_var_t notcovered;
+#endif
+ cpumask_var_t nodemask;
+ cpumask_var_t this_sibling_map;
+ cpumask_var_t this_core_map;
+ cpumask_var_t send_covered;
+ cpumask_var_t tmpmask;
+ struct sched_group **sched_group_nodes;
+ struct root_domain *rd;
+};
+
/*
* SMT sched-domains:
*/
@@ -8491,54 +8507,49 @@ static void set_domain_attribute(struct sched_domain *sd,
static int __build_sched_domains(const struct cpumask *cpu_map,
struct sched_domain_attr *attr)
{
+ struct s_data d;
int i, err = -ENOMEM;
- struct root_domain *rd;
- cpumask_var_t nodemask, this_sibling_map, this_core_map, send_covered,
- tmpmask;
#ifdef CONFIG_NUMA
- cpumask_var_t domainspan, covered, notcovered;
- struct sched_group **sched_group_nodes = NULL;
- int sd_allnodes = 0;
-
- if (!alloc_cpumask_var(&domainspan, GFP_KERNEL))
+ d.sd_allnodes = 0;
+ if (!alloc_cpumask_var(&d.domainspan, GFP_KERNEL))
goto out;
- if (!alloc_cpumask_var(&covered, GFP_KERNEL))
+ if (!alloc_cpumask_var(&d.covered, GFP_KERNEL))
goto free_domainspan;
- if (!alloc_cpumask_var(¬covered, GFP_KERNEL))
+ if (!alloc_cpumask_var(&d.notcovered, GFP_KERNEL))
goto free_covered;
#endif
- if (!alloc_cpumask_var(&nodemask, GFP_KERNEL))
+ if (!alloc_cpumask_var(&d.nodemask, GFP_KERNEL))
goto free_notcovered;
- if (!alloc_cpumask_var(&this_sibling_map, GFP_KERNEL))
+ if (!alloc_cpumask_var(&d.this_sibling_map, GFP_KERNEL))
goto free_nodemask;
- if (!alloc_cpumask_var(&this_core_map, GFP_KERNEL))
+ if (!alloc_cpumask_var(&d.this_core_map, GFP_KERNEL))
goto free_this_sibling_map;
- if (!alloc_cpumask_var(&send_covered, GFP_KERNEL))
+ if (!alloc_cpumask_var(&d.send_covered, GFP_KERNEL))
goto free_this_core_map;
- if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
+ if (!alloc_cpumask_var(&d.tmpmask, GFP_KERNEL))
goto free_send_covered;
#ifdef CONFIG_NUMA
/*
* Allocate the per-node list of sched groups
*/
- sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *),
- GFP_KERNEL);
- if (!sched_group_nodes) {
+ d.sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *),
+ GFP_KERNEL);
+ if (!d.sched_group_nodes) {
printk(KERN_WARNING "Can not alloc sched group node list\n");
goto free_tmpmask;
}
#endif
- rd = alloc_rootdomain();
- if (!rd) {
+ d.rd = alloc_rootdomain();
+ if (!d.rd) {
printk(KERN_WARNING "Cannot alloc root domain\n");
goto free_sched_groups;
}
#ifdef CONFIG_NUMA
- sched_group_nodes_bycpu[cpumask_first(cpu_map)] = sched_group_nodes;
+ sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d.sched_group_nodes;
#endif
/*
@@ -8547,18 +8558,20 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
for_each_cpu(i, cpu_map) {
struct sched_domain *sd = NULL, *p;
- cpumask_and(nodemask, cpumask_of_node(cpu_to_node(i)), cpu_map);
+ cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)),
+ cpu_map);
#ifdef CONFIG_NUMA
if (cpumask_weight(cpu_map) >
- SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) {
+ SD_NODES_PER_DOMAIN*cpumask_weight(d.nodemask)) {
sd = &per_cpu(allnodes_domains, i).sd;
SD_INIT(sd, ALLNODES);
set_domain_attribute(sd, attr);
cpumask_copy(sched_domain_span(sd), cpu_map);
- cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask);
+ cpu_to_allnodes_group(i, cpu_map, &sd->groups,
+ d.tmpmask);
p = sd;
- sd_allnodes = 1;
+ d.sd_allnodes = 1;
} else
p = NULL;
@@ -8577,11 +8590,11 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
sd = &per_cpu(phys_domains, i).sd;
SD_INIT(sd, CPU);
set_domain_attribute(sd, attr);
- cpumask_copy(sched_domain_span(sd), nodemask);
+ cpumask_copy(sched_domain_span(sd), d.nodemask);
sd->parent = p;
if (p)
p->child = sd;
- cpu_to_phys_group(i, cpu_map, &sd->groups, tmpmask);
+ cpu_to_phys_group(i, cpu_map, &sd->groups, d.tmpmask);
#ifdef CONFIG_SCHED_MC
p = sd;
@@ -8592,7 +8605,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
cpu_coregroup_mask(i));
sd->parent = p;
p->child = sd;
- cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask);
+ cpu_to_core_group(i, cpu_map, &sd->groups, d.tmpmask);
#endif
#ifdef CONFIG_SCHED_SMT
@@ -8604,54 +8617,54 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
topology_thread_cpumask(i), cpu_map);
sd->parent = p;
p->child = sd;
- cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask);
+ cpu_to_cpu_group(i, cpu_map, &sd->groups, d.tmpmask);
#endif
}
#ifdef CONFIG_SCHED_SMT
/* Set up CPU (sibling) groups */
for_each_cpu(i, cpu_map) {
- cpumask_and(this_sibling_map,
+ cpumask_and(d.this_sibling_map,
topology_thread_cpumask(i), cpu_map);
- if (i != cpumask_first(this_sibling_map))
+ if (i != cpumask_first(d.this_sibling_map))
continue;
- init_sched_build_groups(this_sibling_map, cpu_map,
+ init_sched_build_groups(d.this_sibling_map, cpu_map,
&cpu_to_cpu_group,
- send_covered, tmpmask);
+ d.send_covered, d.tmpmask);
}
#endif
#ifdef CONFIG_SCHED_MC
/* Set up multi-core groups */
for_each_cpu(i, cpu_map) {
- cpumask_and(this_core_map, cpu_coregroup_mask(i), cpu_map);
- if (i != cpumask_first(this_core_map))
+ cpumask_and(d.this_core_map, cpu_coregroup_mask(i), cpu_map);
+ if (i != cpumask_first(d.this_core_map))
continue;
- init_sched_build_groups(this_core_map, cpu_map,
+ init_sched_build_groups(d.this_core_map, cpu_map,
&cpu_to_core_group,
- send_covered, tmpmask);
+ d.send_covered, d.tmpmask);
}
#endif
/* Set up physical groups */
for (i = 0; i < nr_node_ids; i++) {
- cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
- if (cpumask_empty(nodemask))
+ cpumask_and(d.nodemask, cpumask_of_node(i), cpu_map);
+ if (cpumask_empty(d.nodemask))
continue;
- init_sched_build_groups(nodemask, cpu_map,
+ init_sched_build_groups(d.nodemask, cpu_map,
&cpu_to_phys_group,
- send_covered, tmpmask);
+ d.send_covered, d.tmpmask);
}
#ifdef CONFIG_NUMA
/* Set up node groups */
- if (sd_allnodes) {
+ if (d.sd_allnodes) {
init_sched_build_groups(cpu_map, cpu_map,
&cpu_to_allnodes_group,
- send_covered, tmpmask);
+ d.send_covered, d.tmpmask);
}
for (i = 0; i < nr_node_ids; i++) {
@@ -8659,15 +8672,15 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
struct sched_group *sg, *prev;
int j;
- cpumask_clear(covered);
- cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
- if (cpumask_empty(nodemask)) {
- sched_group_nodes[i] = NULL;
+ cpumask_clear(d.covered);
+ cpumask_and(d.nodemask, cpumask_of_node(i), cpu_map);
+ if (cpumask_empty(d.nodemask)) {
+ d.sched_group_nodes[i] = NULL;
continue;
}
- sched_domain_node_span(i, domainspan);
- cpumask_and(domainspan, domainspan, cpu_map);
+ sched_domain_node_span(i, d.domainspan);
+ cpumask_and(d.domainspan, d.domainspan, cpu_map);
sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
GFP_KERNEL, i);
@@ -8676,30 +8689,30 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
"node %d\n", i);
goto error;
}
- sched_group_nodes[i] = sg;
- for_each_cpu(j, nodemask) {
+ d.sched_group_nodes[i] = sg;
+ for_each_cpu(j, d.nodemask) {
struct sched_domain *sd;
sd = &per_cpu(node_domains, j).sd;
sd->groups = sg;
}
sg->__cpu_power = 0;
- cpumask_copy(sched_group_cpus(sg), nodemask);
+ cpumask_copy(sched_group_cpus(sg), d.nodemask);
sg->next = sg;
- cpumask_or(covered, covered, nodemask);
+ cpumask_or(d.covered, d.covered, d.nodemask);
prev = sg;
for (j = 0; j < nr_node_ids; j++) {
int n = (i + j) % nr_node_ids;
- cpumask_complement(notcovered, covered);
- cpumask_and(tmpmask, notcovered, cpu_map);
- cpumask_and(tmpmask, tmpmask, domainspan);
- if (cpumask_empty(tmpmask))
+ cpumask_complement(d.notcovered, d.covered);
+ cpumask_and(d.tmpmask, d.notcovered, cpu_map);
+ cpumask_and(d.tmpmask, d.tmpmask, d.domainspan);
+ if (cpumask_empty(d.tmpmask))
break;
- cpumask_and(tmpmask, tmpmask, cpumask_of_node(n));
- if (cpumask_empty(tmpmask))
+ cpumask_and(d.tmpmask, d.tmpmask, cpumask_of_node(n));
+ if (cpumask_empty(d.tmpmask))
continue;
sg = kmalloc_node(sizeof(struct sched_group) +
@@ -8711,9 +8724,9 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
goto error;
}
sg->__cpu_power = 0;
- cpumask_copy(sched_group_cpus(sg), tmpmask);
+ cpumask_copy(sched_group_cpus(sg), d.tmpmask);
sg->next = prev->next;
- cpumask_or(covered, covered, tmpmask);
+ cpumask_or(d.covered, d.covered, d.tmpmask);
prev->next = sg;
prev = sg;
}
@@ -8744,13 +8757,13 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
#ifdef CONFIG_NUMA
for (i = 0; i < nr_node_ids; i++)
- init_numa_sched_groups_power(sched_group_nodes[i]);
+ init_numa_sched_groups_power(d.sched_group_nodes[i]);
- if (sd_allnodes) {
+ if (d.sd_allnodes) {
struct sched_group *sg;
cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg,
- tmpmask);
+ d.tmpmask);
init_numa_sched_groups_power(sg);
}
#endif
@@ -8765,42 +8778,42 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
#else
sd = &per_cpu(phys_domains, i).sd;
#endif
- cpu_attach_domain(sd, rd, i);
+ cpu_attach_domain(sd, d.rd, i);
}
err = 0;
free_tmpmask:
- free_cpumask_var(tmpmask);
+ free_cpumask_var(d.tmpmask);
free_send_covered:
- free_cpumask_var(send_covered);
+ free_cpumask_var(d.send_covered);
free_this_core_map:
- free_cpumask_var(this_core_map);
+ free_cpumask_var(d.this_core_map);
free_this_sibling_map:
- free_cpumask_var(this_sibling_map);
+ free_cpumask_var(d.this_sibling_map);
free_nodemask:
- free_cpumask_var(nodemask);
+ free_cpumask_var(d.nodemask);
free_notcovered:
#ifdef CONFIG_NUMA
- free_cpumask_var(notcovered);
+ free_cpumask_var(d.notcovered);
free_covered:
- free_cpumask_var(covered);
+ free_cpumask_var(d.covered);
free_domainspan:
- free_cpumask_var(domainspan);
+ free_cpumask_var(d.domainspan);
out:
#endif
return err;
free_sched_groups:
#ifdef CONFIG_NUMA
- kfree(sched_group_nodes);
+ kfree(d.sched_group_nodes);
#endif
goto free_tmpmask;
#ifdef CONFIG_NUMA
error:
- free_sched_groups(cpu_map, tmpmask);
- free_rootdomain(rd);
+ free_sched_groups(cpu_map, d.tmpmask);
+ free_rootdomain(d.rd);
goto free_tmpmask;
#endif
}
--
1.6.4
^ permalink raw reply related [flat|nested] 30+ messages in thread* [tip:sched/domains] sched: Use structure to store local data in __build_sched_domains
2009-08-18 10:51 ` [PATCH 01/12] sched: Use structure to store local data in __build_sched_domains Andreas Herrmann
@ 2009-08-18 16:51 ` tip-bot for Andreas Herrmann
0 siblings, 0 replies; 30+ messages in thread
From: tip-bot for Andreas Herrmann @ 2009-08-18 16:51 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, hpa, mingo, andreas.herrmann3, peterz, tglx, mingo
Commit-ID: 49a02c514d967921a908ac64e9c0ec0f0fc17fd8
Gitweb: http://git.kernel.org/tip/49a02c514d967921a908ac64e9c0ec0f0fc17fd8
Author: Andreas Herrmann <andreas.herrmann3@amd.com>
AuthorDate: Tue, 18 Aug 2009 12:51:52 +0200
Committer: Ingo Molnar <mingo@elte.hu>
CommitDate: Tue, 18 Aug 2009 18:35:39 +0200
sched: Use structure to store local data in __build_sched_domains
Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
Cc: Peter Zijlstra <peterz@infradead.org>
LKML-Reference: <20090818105152.GB29515@alberich.amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
kernel/sched.c | 165 ++++++++++++++++++++++++++++++--------------------------
1 files changed, 89 insertions(+), 76 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index 1b59e26..565ff77 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8091,6 +8091,22 @@ struct static_sched_domain {
DECLARE_BITMAP(span, CONFIG_NR_CPUS);
};
+struct s_data {
+#ifdef CONFIG_NUMA
+ int sd_allnodes;
+ cpumask_var_t domainspan;
+ cpumask_var_t covered;
+ cpumask_var_t notcovered;
+#endif
+ cpumask_var_t nodemask;
+ cpumask_var_t this_sibling_map;
+ cpumask_var_t this_core_map;
+ cpumask_var_t send_covered;
+ cpumask_var_t tmpmask;
+ struct sched_group **sched_group_nodes;
+ struct root_domain *rd;
+};
+
/*
* SMT sched-domains:
*/
@@ -8385,54 +8401,49 @@ static void set_domain_attribute(struct sched_domain *sd,
static int __build_sched_domains(const struct cpumask *cpu_map,
struct sched_domain_attr *attr)
{
+ struct s_data d;
int i, err = -ENOMEM;
- struct root_domain *rd;
- cpumask_var_t nodemask, this_sibling_map, this_core_map, send_covered,
- tmpmask;
#ifdef CONFIG_NUMA
- cpumask_var_t domainspan, covered, notcovered;
- struct sched_group **sched_group_nodes = NULL;
- int sd_allnodes = 0;
-
- if (!alloc_cpumask_var(&domainspan, GFP_KERNEL))
+ d.sd_allnodes = 0;
+ if (!alloc_cpumask_var(&d.domainspan, GFP_KERNEL))
goto out;
- if (!alloc_cpumask_var(&covered, GFP_KERNEL))
+ if (!alloc_cpumask_var(&d.covered, GFP_KERNEL))
goto free_domainspan;
- if (!alloc_cpumask_var(¬covered, GFP_KERNEL))
+ if (!alloc_cpumask_var(&d.notcovered, GFP_KERNEL))
goto free_covered;
#endif
- if (!alloc_cpumask_var(&nodemask, GFP_KERNEL))
+ if (!alloc_cpumask_var(&d.nodemask, GFP_KERNEL))
goto free_notcovered;
- if (!alloc_cpumask_var(&this_sibling_map, GFP_KERNEL))
+ if (!alloc_cpumask_var(&d.this_sibling_map, GFP_KERNEL))
goto free_nodemask;
- if (!alloc_cpumask_var(&this_core_map, GFP_KERNEL))
+ if (!alloc_cpumask_var(&d.this_core_map, GFP_KERNEL))
goto free_this_sibling_map;
- if (!alloc_cpumask_var(&send_covered, GFP_KERNEL))
+ if (!alloc_cpumask_var(&d.send_covered, GFP_KERNEL))
goto free_this_core_map;
- if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
+ if (!alloc_cpumask_var(&d.tmpmask, GFP_KERNEL))
goto free_send_covered;
#ifdef CONFIG_NUMA
/*
* Allocate the per-node list of sched groups
*/
- sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *),
- GFP_KERNEL);
- if (!sched_group_nodes) {
+ d.sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *),
+ GFP_KERNEL);
+ if (!d.sched_group_nodes) {
printk(KERN_WARNING "Can not alloc sched group node list\n");
goto free_tmpmask;
}
#endif
- rd = alloc_rootdomain();
- if (!rd) {
+ d.rd = alloc_rootdomain();
+ if (!d.rd) {
printk(KERN_WARNING "Cannot alloc root domain\n");
goto free_sched_groups;
}
#ifdef CONFIG_NUMA
- sched_group_nodes_bycpu[cpumask_first(cpu_map)] = sched_group_nodes;
+ sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d.sched_group_nodes;
#endif
/*
@@ -8441,18 +8452,20 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
for_each_cpu(i, cpu_map) {
struct sched_domain *sd = NULL, *p;
- cpumask_and(nodemask, cpumask_of_node(cpu_to_node(i)), cpu_map);
+ cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)),
+ cpu_map);
#ifdef CONFIG_NUMA
if (cpumask_weight(cpu_map) >
- SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) {
+ SD_NODES_PER_DOMAIN*cpumask_weight(d.nodemask)) {
sd = &per_cpu(allnodes_domains, i).sd;
SD_INIT(sd, ALLNODES);
set_domain_attribute(sd, attr);
cpumask_copy(sched_domain_span(sd), cpu_map);
- cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask);
+ cpu_to_allnodes_group(i, cpu_map, &sd->groups,
+ d.tmpmask);
p = sd;
- sd_allnodes = 1;
+ d.sd_allnodes = 1;
} else
p = NULL;
@@ -8471,11 +8484,11 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
sd = &per_cpu(phys_domains, i).sd;
SD_INIT(sd, CPU);
set_domain_attribute(sd, attr);
- cpumask_copy(sched_domain_span(sd), nodemask);
+ cpumask_copy(sched_domain_span(sd), d.nodemask);
sd->parent = p;
if (p)
p->child = sd;
- cpu_to_phys_group(i, cpu_map, &sd->groups, tmpmask);
+ cpu_to_phys_group(i, cpu_map, &sd->groups, d.tmpmask);
#ifdef CONFIG_SCHED_MC
p = sd;
@@ -8486,7 +8499,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
cpu_coregroup_mask(i));
sd->parent = p;
p->child = sd;
- cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask);
+ cpu_to_core_group(i, cpu_map, &sd->groups, d.tmpmask);
#endif
#ifdef CONFIG_SCHED_SMT
@@ -8498,54 +8511,54 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
topology_thread_cpumask(i), cpu_map);
sd->parent = p;
p->child = sd;
- cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask);
+ cpu_to_cpu_group(i, cpu_map, &sd->groups, d.tmpmask);
#endif
}
#ifdef CONFIG_SCHED_SMT
/* Set up CPU (sibling) groups */
for_each_cpu(i, cpu_map) {
- cpumask_and(this_sibling_map,
+ cpumask_and(d.this_sibling_map,
topology_thread_cpumask(i), cpu_map);
- if (i != cpumask_first(this_sibling_map))
+ if (i != cpumask_first(d.this_sibling_map))
continue;
- init_sched_build_groups(this_sibling_map, cpu_map,
+ init_sched_build_groups(d.this_sibling_map, cpu_map,
&cpu_to_cpu_group,
- send_covered, tmpmask);
+ d.send_covered, d.tmpmask);
}
#endif
#ifdef CONFIG_SCHED_MC
/* Set up multi-core groups */
for_each_cpu(i, cpu_map) {
- cpumask_and(this_core_map, cpu_coregroup_mask(i), cpu_map);
- if (i != cpumask_first(this_core_map))
+ cpumask_and(d.this_core_map, cpu_coregroup_mask(i), cpu_map);
+ if (i != cpumask_first(d.this_core_map))
continue;
- init_sched_build_groups(this_core_map, cpu_map,
+ init_sched_build_groups(d.this_core_map, cpu_map,
&cpu_to_core_group,
- send_covered, tmpmask);
+ d.send_covered, d.tmpmask);
}
#endif
/* Set up physical groups */
for (i = 0; i < nr_node_ids; i++) {
- cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
- if (cpumask_empty(nodemask))
+ cpumask_and(d.nodemask, cpumask_of_node(i), cpu_map);
+ if (cpumask_empty(d.nodemask))
continue;
- init_sched_build_groups(nodemask, cpu_map,
+ init_sched_build_groups(d.nodemask, cpu_map,
&cpu_to_phys_group,
- send_covered, tmpmask);
+ d.send_covered, d.tmpmask);
}
#ifdef CONFIG_NUMA
/* Set up node groups */
- if (sd_allnodes) {
+ if (d.sd_allnodes) {
init_sched_build_groups(cpu_map, cpu_map,
&cpu_to_allnodes_group,
- send_covered, tmpmask);
+ d.send_covered, d.tmpmask);
}
for (i = 0; i < nr_node_ids; i++) {
@@ -8553,15 +8566,15 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
struct sched_group *sg, *prev;
int j;
- cpumask_clear(covered);
- cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
- if (cpumask_empty(nodemask)) {
- sched_group_nodes[i] = NULL;
+ cpumask_clear(d.covered);
+ cpumask_and(d.nodemask, cpumask_of_node(i), cpu_map);
+ if (cpumask_empty(d.nodemask)) {
+ d.sched_group_nodes[i] = NULL;
continue;
}
- sched_domain_node_span(i, domainspan);
- cpumask_and(domainspan, domainspan, cpu_map);
+ sched_domain_node_span(i, d.domainspan);
+ cpumask_and(d.domainspan, d.domainspan, cpu_map);
sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
GFP_KERNEL, i);
@@ -8570,30 +8583,30 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
"node %d\n", i);
goto error;
}
- sched_group_nodes[i] = sg;
- for_each_cpu(j, nodemask) {
+ d.sched_group_nodes[i] = sg;
+ for_each_cpu(j, d.nodemask) {
struct sched_domain *sd;
sd = &per_cpu(node_domains, j).sd;
sd->groups = sg;
}
sg->__cpu_power = 0;
- cpumask_copy(sched_group_cpus(sg), nodemask);
+ cpumask_copy(sched_group_cpus(sg), d.nodemask);
sg->next = sg;
- cpumask_or(covered, covered, nodemask);
+ cpumask_or(d.covered, d.covered, d.nodemask);
prev = sg;
for (j = 0; j < nr_node_ids; j++) {
int n = (i + j) % nr_node_ids;
- cpumask_complement(notcovered, covered);
- cpumask_and(tmpmask, notcovered, cpu_map);
- cpumask_and(tmpmask, tmpmask, domainspan);
- if (cpumask_empty(tmpmask))
+ cpumask_complement(d.notcovered, d.covered);
+ cpumask_and(d.tmpmask, d.notcovered, cpu_map);
+ cpumask_and(d.tmpmask, d.tmpmask, d.domainspan);
+ if (cpumask_empty(d.tmpmask))
break;
- cpumask_and(tmpmask, tmpmask, cpumask_of_node(n));
- if (cpumask_empty(tmpmask))
+ cpumask_and(d.tmpmask, d.tmpmask, cpumask_of_node(n));
+ if (cpumask_empty(d.tmpmask))
continue;
sg = kmalloc_node(sizeof(struct sched_group) +
@@ -8605,9 +8618,9 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
goto error;
}
sg->__cpu_power = 0;
- cpumask_copy(sched_group_cpus(sg), tmpmask);
+ cpumask_copy(sched_group_cpus(sg), d.tmpmask);
sg->next = prev->next;
- cpumask_or(covered, covered, tmpmask);
+ cpumask_or(d.covered, d.covered, d.tmpmask);
prev->next = sg;
prev = sg;
}
@@ -8638,13 +8651,13 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
#ifdef CONFIG_NUMA
for (i = 0; i < nr_node_ids; i++)
- init_numa_sched_groups_power(sched_group_nodes[i]);
+ init_numa_sched_groups_power(d.sched_group_nodes[i]);
- if (sd_allnodes) {
+ if (d.sd_allnodes) {
struct sched_group *sg;
cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg,
- tmpmask);
+ d.tmpmask);
init_numa_sched_groups_power(sg);
}
#endif
@@ -8659,42 +8672,42 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
#else
sd = &per_cpu(phys_domains, i).sd;
#endif
- cpu_attach_domain(sd, rd, i);
+ cpu_attach_domain(sd, d.rd, i);
}
err = 0;
free_tmpmask:
- free_cpumask_var(tmpmask);
+ free_cpumask_var(d.tmpmask);
free_send_covered:
- free_cpumask_var(send_covered);
+ free_cpumask_var(d.send_covered);
free_this_core_map:
- free_cpumask_var(this_core_map);
+ free_cpumask_var(d.this_core_map);
free_this_sibling_map:
- free_cpumask_var(this_sibling_map);
+ free_cpumask_var(d.this_sibling_map);
free_nodemask:
- free_cpumask_var(nodemask);
+ free_cpumask_var(d.nodemask);
free_notcovered:
#ifdef CONFIG_NUMA
- free_cpumask_var(notcovered);
+ free_cpumask_var(d.notcovered);
free_covered:
- free_cpumask_var(covered);
+ free_cpumask_var(d.covered);
free_domainspan:
- free_cpumask_var(domainspan);
+ free_cpumask_var(d.domainspan);
out:
#endif
return err;
free_sched_groups:
#ifdef CONFIG_NUMA
- kfree(sched_group_nodes);
+ kfree(d.sched_group_nodes);
#endif
goto free_tmpmask;
#ifdef CONFIG_NUMA
error:
- free_sched_groups(cpu_map, tmpmask);
- free_rootdomain(rd);
+ free_sched_groups(cpu_map, d.tmpmask);
+ free_rootdomain(d.rd);
goto free_tmpmask;
#endif
}
^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH 02/12] sched: Separate out allocation/free/goto-hell from __build_sched_domains
2009-08-18 10:49 [PATCH 0/12] cleanup __build_sched_domains() Andreas Herrmann
2009-08-18 10:51 ` [PATCH 01/12] sched: Use structure to store local data in __build_sched_domains Andreas Herrmann
@ 2009-08-18 10:53 ` Andreas Herrmann
2009-08-18 12:57 ` Peter Zijlstra
2009-08-18 16:52 ` [tip:sched/domains] " tip-bot for Andreas Herrmann
2009-08-18 10:54 ` [PATCH 03/12] sched: Seperate out build of NUMA sched domain " Andreas Herrmann
` (10 subsequent siblings)
12 siblings, 2 replies; 30+ messages in thread
From: Andreas Herrmann @ 2009-08-18 10:53 UTC (permalink / raw)
To: Ingo Molnar, Peter Zijlstra; +Cc: linux-kernel
Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
---
kernel/sched.c | 171 ++++++++++++++++++++++++++++++++-----------------------
1 files changed, 99 insertions(+), 72 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index 30fc914..5aa7dad 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8213,6 +8213,23 @@ struct s_data {
struct root_domain *rd;
};
+enum s_alloc {
+ sa_sched_groups = 0,
+ sa_rootdomain,
+ sa_tmpmask,
+ sa_send_covered,
+ sa_this_core_map,
+ sa_this_sibling_map,
+ sa_nodemask,
+ sa_sched_group_nodes,
+#ifdef CONFIG_NUMA
+ sa_notcovered,
+ sa_covered,
+ sa_domainspan,
+#endif
+ sa_none,
+};
+
/*
* SMT sched-domains:
*/
@@ -8500,6 +8517,77 @@ static void set_domain_attribute(struct sched_domain *sd,
}
}
+static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
+ const struct cpumask *cpu_map)
+{
+ switch (what) {
+ case sa_sched_groups:
+ free_sched_groups(cpu_map, d->tmpmask); /* fall through */
+ d->sched_group_nodes = NULL;
+ case sa_rootdomain:
+ free_rootdomain(d->rd); /* fall through */
+ case sa_tmpmask:
+ free_cpumask_var(d->tmpmask); /* fall through */
+ case sa_send_covered:
+ free_cpumask_var(d->send_covered); /* fall through */
+ case sa_this_core_map:
+ free_cpumask_var(d->this_core_map); /* fall through */
+ case sa_this_sibling_map:
+ free_cpumask_var(d->this_sibling_map); /* fall through */
+ case sa_nodemask:
+ free_cpumask_var(d->nodemask); /* fall through */
+ case sa_sched_group_nodes:
+#ifdef CONFIG_NUMA
+ kfree(d->sched_group_nodes); /* fall through */
+ case sa_notcovered:
+ free_cpumask_var(d->notcovered); /* fall through */
+ case sa_covered:
+ free_cpumask_var(d->covered); /* fall through */
+ case sa_domainspan:
+ free_cpumask_var(d->domainspan); /* fall through */
+#endif
+ case sa_none:
+ break;
+ }
+}
+
+static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
+ const struct cpumask *cpu_map)
+{
+#ifdef CONFIG_NUMA
+ if (!alloc_cpumask_var(&d->domainspan, GFP_KERNEL))
+ return sa_none;
+ if (!alloc_cpumask_var(&d->covered, GFP_KERNEL))
+ return sa_domainspan;
+ if (!alloc_cpumask_var(&d->notcovered, GFP_KERNEL))
+ return sa_covered;
+ /* Allocate the per-node list of sched groups */
+ d->sched_group_nodes = kcalloc(nr_node_ids,
+ sizeof(struct sched_group *), GFP_KERNEL);
+ if (!d->sched_group_nodes) {
+ printk(KERN_WARNING "Can not alloc sched group node list\n");
+ return sa_notcovered;
+ }
+ sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes;
+#endif
+ if (!alloc_cpumask_var(&d->nodemask, GFP_KERNEL))
+ return sa_sched_group_nodes;
+ if (!alloc_cpumask_var(&d->this_sibling_map, GFP_KERNEL))
+ return sa_nodemask;
+ if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL))
+ return sa_this_sibling_map;
+ if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL))
+ return sa_this_core_map;
+ if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL))
+ return sa_send_covered;
+ d->rd = alloc_rootdomain();
+ if (!d->rd) {
+ printk(KERN_WARNING "Cannot alloc root domain\n");
+ return sa_tmpmask;
+ }
+ return sa_rootdomain;
+}
+
/*
* Build sched domains for a given set of cpus and attach the sched domains
* to the individual cpus
@@ -8507,50 +8595,17 @@ static void set_domain_attribute(struct sched_domain *sd,
static int __build_sched_domains(const struct cpumask *cpu_map,
struct sched_domain_attr *attr)
{
+ enum s_alloc alloc_state = sa_none;
struct s_data d;
- int i, err = -ENOMEM;
+ int i;
#ifdef CONFIG_NUMA
d.sd_allnodes = 0;
- if (!alloc_cpumask_var(&d.domainspan, GFP_KERNEL))
- goto out;
- if (!alloc_cpumask_var(&d.covered, GFP_KERNEL))
- goto free_domainspan;
- if (!alloc_cpumask_var(&d.notcovered, GFP_KERNEL))
- goto free_covered;
-#endif
-
- if (!alloc_cpumask_var(&d.nodemask, GFP_KERNEL))
- goto free_notcovered;
- if (!alloc_cpumask_var(&d.this_sibling_map, GFP_KERNEL))
- goto free_nodemask;
- if (!alloc_cpumask_var(&d.this_core_map, GFP_KERNEL))
- goto free_this_sibling_map;
- if (!alloc_cpumask_var(&d.send_covered, GFP_KERNEL))
- goto free_this_core_map;
- if (!alloc_cpumask_var(&d.tmpmask, GFP_KERNEL))
- goto free_send_covered;
-
-#ifdef CONFIG_NUMA
- /*
- * Allocate the per-node list of sched groups
- */
- d.sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *),
- GFP_KERNEL);
- if (!d.sched_group_nodes) {
- printk(KERN_WARNING "Can not alloc sched group node list\n");
- goto free_tmpmask;
- }
#endif
- d.rd = alloc_rootdomain();
- if (!d.rd) {
- printk(KERN_WARNING "Cannot alloc root domain\n");
- goto free_sched_groups;
- }
-
-#ifdef CONFIG_NUMA
- sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d.sched_group_nodes;
-#endif
+ alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
+ if (alloc_state != sa_rootdomain)
+ goto error;
+ alloc_state = sa_sched_groups;
/*
* Set up domains for cpus specified by the cpu_map.
@@ -8781,41 +8836,13 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
cpu_attach_domain(sd, d.rd, i);
}
- err = 0;
-
-free_tmpmask:
- free_cpumask_var(d.tmpmask);
-free_send_covered:
- free_cpumask_var(d.send_covered);
-free_this_core_map:
- free_cpumask_var(d.this_core_map);
-free_this_sibling_map:
- free_cpumask_var(d.this_sibling_map);
-free_nodemask:
- free_cpumask_var(d.nodemask);
-free_notcovered:
-#ifdef CONFIG_NUMA
- free_cpumask_var(d.notcovered);
-free_covered:
- free_cpumask_var(d.covered);
-free_domainspan:
- free_cpumask_var(d.domainspan);
-out:
-#endif
- return err;
-
-free_sched_groups:
-#ifdef CONFIG_NUMA
- kfree(d.sched_group_nodes);
-#endif
- goto free_tmpmask;
+ d.sched_group_nodes = NULL; /* don't free this we still need it */
+ __free_domain_allocs(&d, sa_tmpmask, cpu_map);
+ return 0;
-#ifdef CONFIG_NUMA
error:
- free_sched_groups(cpu_map, d.tmpmask);
- free_rootdomain(d.rd);
- goto free_tmpmask;
-#endif
+ __free_domain_allocs(&d, alloc_state, cpu_map);
+ return -ENOMEM;
}
static int build_sched_domains(const struct cpumask *cpu_map)
--
1.6.4
^ permalink raw reply related [flat|nested] 30+ messages in thread* Re: [PATCH 02/12] sched: Separate out allocation/free/goto-hell from __build_sched_domains
2009-08-18 10:53 ` [PATCH 02/12] sched: Separate out allocation/free/goto-hell from __build_sched_domains Andreas Herrmann
@ 2009-08-18 12:57 ` Peter Zijlstra
2009-08-18 13:35 ` Andreas Herrmann
2009-08-18 16:52 ` [tip:sched/domains] " tip-bot for Andreas Herrmann
1 sibling, 1 reply; 30+ messages in thread
From: Peter Zijlstra @ 2009-08-18 12:57 UTC (permalink / raw)
To: Andreas Herrmann; +Cc: Ingo Molnar, linux-kernel
On Tue, 2009-08-18 at 12:53 +0200, Andreas Herrmann wrote:
> @@ -8213,6 +8213,23 @@ struct s_data {
> struct root_domain *rd;
> };
>
> +enum s_alloc {
> + sa_sched_groups = 0,
> + sa_rootdomain,
> + sa_tmpmask,
> + sa_send_covered,
> + sa_this_core_map,
> + sa_this_sibling_map,
> + sa_nodemask,
> + sa_sched_group_nodes,
> +#ifdef CONFIG_NUMA
> + sa_notcovered,
> + sa_covered,
> + sa_domainspan,
> +#endif
> + sa_none,
> +};
> +
> /*
> * SMT sched-domains:
> */
> @@ -8500,6 +8517,77 @@ static void set_domain_attribute(struct sched_domain *sd,
> }
> }
>
> +static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
> + const struct cpumask *cpu_map)
> +{
> + switch (what) {
> + case sa_sched_groups:
> + free_sched_groups(cpu_map, d->tmpmask); /* fall through */
> + d->sched_group_nodes = NULL;
> + case sa_rootdomain:
> + free_rootdomain(d->rd); /* fall through */
> + case sa_tmpmask:
> + free_cpumask_var(d->tmpmask); /* fall through */
> + case sa_send_covered:
> + free_cpumask_var(d->send_covered); /* fall through */
> + case sa_this_core_map:
> + free_cpumask_var(d->this_core_map); /* fall through */
> + case sa_this_sibling_map:
> + free_cpumask_var(d->this_sibling_map); /* fall through */
> + case sa_nodemask:
> + free_cpumask_var(d->nodemask); /* fall through */
> + case sa_sched_group_nodes:
> +#ifdef CONFIG_NUMA
> + kfree(d->sched_group_nodes); /* fall through */
> + case sa_notcovered:
> + free_cpumask_var(d->notcovered); /* fall through */
> + case sa_covered:
> + free_cpumask_var(d->covered); /* fall through */
> + case sa_domainspan:
> + free_cpumask_var(d->domainspan); /* fall through */
> +#endif
> + case sa_none:
> + break;
> + }
> +}
> +
> +static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
> + const struct cpumask *cpu_map)
> +{
> +#ifdef CONFIG_NUMA
> + if (!alloc_cpumask_var(&d->domainspan, GFP_KERNEL))
> + return sa_none;
> + if (!alloc_cpumask_var(&d->covered, GFP_KERNEL))
> + return sa_domainspan;
> + if (!alloc_cpumask_var(&d->notcovered, GFP_KERNEL))
> + return sa_covered;
> + /* Allocate the per-node list of sched groups */
> + d->sched_group_nodes = kcalloc(nr_node_ids,
> + sizeof(struct sched_group *), GFP_KERNEL);
> + if (!d->sched_group_nodes) {
> + printk(KERN_WARNING "Can not alloc sched group node list\n");
> + return sa_notcovered;
> + }
> + sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes;
> +#endif
> + if (!alloc_cpumask_var(&d->nodemask, GFP_KERNEL))
> + return sa_sched_group_nodes;
> + if (!alloc_cpumask_var(&d->this_sibling_map, GFP_KERNEL))
> + return sa_nodemask;
> + if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL))
> + return sa_this_sibling_map;
> + if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL))
> + return sa_this_core_map;
> + if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL))
> + return sa_send_covered;
> + d->rd = alloc_rootdomain();
> + if (!d->rd) {
> + printk(KERN_WARNING "Cannot alloc root domain\n");
> + return sa_tmpmask;
> + }
> + return sa_rootdomain;
> +}
Code like this makes me wonder if the decomposition you chose is the
right one.
I'd much rather see something that keeps the various domain levels fully
isolated. That is, the numa code should not need to know anything about
the multi-core code etc.
The above we still have everything mixed in one.
Maybe something along the lines of (skipping lots of fun detail):
struct domain_constructor {
struct sched_domain *func(const struct cpumask *cpu_map,
struct sched_domain_attr *attr);
};
struct domain_constructor domain_constructors[] = {
{ &construct_numa_domain },
{ &construct_mc_domain },
{ &construct_cpu_domain },
{ &construct_smt_domain },
};
static int construct_sched_domains(const struct cpumask *cpu_map,
struct sched_domain_attr *attr)
{
int i;
struct sched_domain *top = NULL, *parent = NULL, *sd;
for (i = 0; i < ARRAY_SIZE(domain_constructors); i++) {
sd = domain_constructors[i].func(cpu_map, attr);
if (!sd)
continue;
if (IS_PTR(sd)) {
ret = PTR_ERR(sd);
goto fail;
}
if (!top)
top = sd;
if (degenerate_domain(parent, sd)) {
fold_domain(parent, sd);
sd->destroy();
continue;
}
sd->parent = parent;
parent = sd;
}
ret = attach_domain(sd);
if (ret)
goto fail;
out:
return ret;
fail:
for (sd = parent; sd; sd = parent) {
parent = sd->parent;
sd->destroy();
}
goto out;
}
^ permalink raw reply [flat|nested] 30+ messages in thread* Re: [PATCH 02/12] sched: Separate out allocation/free/goto-hell from __build_sched_domains
2009-08-18 12:57 ` Peter Zijlstra
@ 2009-08-18 13:35 ` Andreas Herrmann
0 siblings, 0 replies; 30+ messages in thread
From: Andreas Herrmann @ 2009-08-18 13:35 UTC (permalink / raw)
To: Peter Zijlstra; +Cc: Ingo Molnar, linux-kernel
On Tue, Aug 18, 2009 at 02:57:10PM +0200, Peter Zijlstra wrote:
> On Tue, 2009-08-18 at 12:53 +0200, Andreas Herrmann wrote:
> > @@ -8213,6 +8213,23 @@ struct s_data {
> > struct root_domain *rd;
> > };
> >
> > +enum s_alloc {
> > + sa_sched_groups = 0,
> > + sa_rootdomain,
> > + sa_tmpmask,
> > + sa_send_covered,
> > + sa_this_core_map,
> > + sa_this_sibling_map,
> > + sa_nodemask,
> > + sa_sched_group_nodes,
> > +#ifdef CONFIG_NUMA
> > + sa_notcovered,
> > + sa_covered,
> > + sa_domainspan,
> > +#endif
> > + sa_none,
> > +};
> > +
> > /*
> > * SMT sched-domains:
> > */
> > @@ -8500,6 +8517,77 @@ static void set_domain_attribute(struct sched_domain *sd,
> > }
> > }
> >
> > +static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
> > + const struct cpumask *cpu_map)
> > +{
> > + switch (what) {
> > + case sa_sched_groups:
> > + free_sched_groups(cpu_map, d->tmpmask); /* fall through */
> > + d->sched_group_nodes = NULL;
> > + case sa_rootdomain:
> > + free_rootdomain(d->rd); /* fall through */
> > + case sa_tmpmask:
> > + free_cpumask_var(d->tmpmask); /* fall through */
> > + case sa_send_covered:
> > + free_cpumask_var(d->send_covered); /* fall through */
> > + case sa_this_core_map:
> > + free_cpumask_var(d->this_core_map); /* fall through */
> > + case sa_this_sibling_map:
> > + free_cpumask_var(d->this_sibling_map); /* fall through */
> > + case sa_nodemask:
> > + free_cpumask_var(d->nodemask); /* fall through */
> > + case sa_sched_group_nodes:
> > +#ifdef CONFIG_NUMA
> > + kfree(d->sched_group_nodes); /* fall through */
> > + case sa_notcovered:
> > + free_cpumask_var(d->notcovered); /* fall through */
> > + case sa_covered:
> > + free_cpumask_var(d->covered); /* fall through */
> > + case sa_domainspan:
> > + free_cpumask_var(d->domainspan); /* fall through */
> > +#endif
> > + case sa_none:
> > + break;
> > + }
> > +}
> > +
> > +static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
> > + const struct cpumask *cpu_map)
> > +{
> > +#ifdef CONFIG_NUMA
> > + if (!alloc_cpumask_var(&d->domainspan, GFP_KERNEL))
> > + return sa_none;
> > + if (!alloc_cpumask_var(&d->covered, GFP_KERNEL))
> > + return sa_domainspan;
> > + if (!alloc_cpumask_var(&d->notcovered, GFP_KERNEL))
> > + return sa_covered;
> > + /* Allocate the per-node list of sched groups */
> > + d->sched_group_nodes = kcalloc(nr_node_ids,
> > + sizeof(struct sched_group *), GFP_KERNEL);
> > + if (!d->sched_group_nodes) {
> > + printk(KERN_WARNING "Can not alloc sched group node list\n");
> > + return sa_notcovered;
> > + }
> > + sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes;
> > +#endif
> > + if (!alloc_cpumask_var(&d->nodemask, GFP_KERNEL))
> > + return sa_sched_group_nodes;
> > + if (!alloc_cpumask_var(&d->this_sibling_map, GFP_KERNEL))
> > + return sa_nodemask;
> > + if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL))
> > + return sa_this_sibling_map;
> > + if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL))
> > + return sa_this_core_map;
> > + if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL))
> > + return sa_send_covered;
> > + d->rd = alloc_rootdomain();
> > + if (!d->rd) {
> > + printk(KERN_WARNING "Cannot alloc root domain\n");
> > + return sa_tmpmask;
> > + }
> > + return sa_rootdomain;
> > +}
>
> Code like this makes me wonder if the decomposition you chose is the
> right one.
It was the fastest decomposition to get much stuff out of the way when
working on this huge function -- plus without introducing (too many)
regressions.
> I'd much rather see something that keeps the various domain levels fully
> isolated. That is, the numa code should not need to know anything about
> the multi-core code etc.
The question is how fesible this is.
There are various dependencies in the current code, e.g. the
degeneration step is done at very last.
Not sure at the moment whether all steps can be intermediately
performed. (i.e. initial creation, building groups, set power)
But probably this could work.
> The above we still have everything mixed in one.
Yep.
> Maybe something along the lines of (skipping lots of fun detail):
>
> struct domain_constructor {
> struct sched_domain *func(const struct cpumask *cpu_map,
> struct sched_domain_attr *attr);
> };
>
> struct domain_constructor domain_constructors[] = {
> { &construct_numa_domain },
> { &construct_mc_domain },
> { &construct_cpu_domain },
> { &construct_smt_domain },
> };
>
> static int construct_sched_domains(const struct cpumask *cpu_map,
> struct sched_domain_attr *attr)
> {
> int i;
> struct sched_domain *top = NULL, *parent = NULL, *sd;
>
> for (i = 0; i < ARRAY_SIZE(domain_constructors); i++) {
> sd = domain_constructors[i].func(cpu_map, attr);
> if (!sd)
> continue;
> if (IS_PTR(sd)) {
> ret = PTR_ERR(sd);
> goto fail;
> }
> if (!top)
> top = sd;
>
> if (degenerate_domain(parent, sd)) {
> fold_domain(parent, sd);
> sd->destroy();
> continue;
> }
>
> sd->parent = parent;
> parent = sd;
> }
>
> ret = attach_domain(sd);
> if (ret)
> goto fail;
>
> out:
> return ret;
>
> fail:
> for (sd = parent; sd; sd = parent) {
> parent = sd->parent;
> sd->destroy();
> }
>
> goto out;
> }
Yes, it would be interesting to see this implemented ;-)
At least there's room for improvement in the domain creation code.
Regards,
Andreas
--
Operating | Advanced Micro Devices GmbH
System | Karl-Hammerschmidt-Str. 34, 85609 Dornach b. München, Germany
Research | Geschäftsführer: Thomas M. McCoy, Giuliano Meroni
Center | Sitz: Dornach, Gemeinde Aschheim, Landkreis München
(OSRC) | Registergericht München, HRB Nr. 43632
^ permalink raw reply [flat|nested] 30+ messages in thread
* [tip:sched/domains] sched: Separate out allocation/free/goto-hell from __build_sched_domains
2009-08-18 10:53 ` [PATCH 02/12] sched: Separate out allocation/free/goto-hell from __build_sched_domains Andreas Herrmann
2009-08-18 12:57 ` Peter Zijlstra
@ 2009-08-18 16:52 ` tip-bot for Andreas Herrmann
1 sibling, 0 replies; 30+ messages in thread
From: tip-bot for Andreas Herrmann @ 2009-08-18 16:52 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, hpa, mingo, andreas.herrmann3, peterz, tglx, mingo
Commit-ID: 2109b99ee192764b407dc7f52babb74740eea6f9
Gitweb: http://git.kernel.org/tip/2109b99ee192764b407dc7f52babb74740eea6f9
Author: Andreas Herrmann <andreas.herrmann3@amd.com>
AuthorDate: Tue, 18 Aug 2009 12:53:00 +0200
Committer: Ingo Molnar <mingo@elte.hu>
CommitDate: Tue, 18 Aug 2009 18:35:39 +0200
sched: Separate out allocation/free/goto-hell from __build_sched_domains
Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
Cc: Peter Zijlstra <peterz@infradead.org>
LKML-Reference: <20090818105300.GC29515@alberich.amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
kernel/sched.c | 171 ++++++++++++++++++++++++++++++++-----------------------
1 files changed, 99 insertions(+), 72 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index 565ff77..c5d1fee 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8107,6 +8107,23 @@ struct s_data {
struct root_domain *rd;
};
+enum s_alloc {
+ sa_sched_groups = 0,
+ sa_rootdomain,
+ sa_tmpmask,
+ sa_send_covered,
+ sa_this_core_map,
+ sa_this_sibling_map,
+ sa_nodemask,
+ sa_sched_group_nodes,
+#ifdef CONFIG_NUMA
+ sa_notcovered,
+ sa_covered,
+ sa_domainspan,
+#endif
+ sa_none,
+};
+
/*
* SMT sched-domains:
*/
@@ -8394,6 +8411,77 @@ static void set_domain_attribute(struct sched_domain *sd,
}
}
+static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
+ const struct cpumask *cpu_map)
+{
+ switch (what) {
+ case sa_sched_groups:
+ free_sched_groups(cpu_map, d->tmpmask); /* fall through */
+ d->sched_group_nodes = NULL;
+ case sa_rootdomain:
+ free_rootdomain(d->rd); /* fall through */
+ case sa_tmpmask:
+ free_cpumask_var(d->tmpmask); /* fall through */
+ case sa_send_covered:
+ free_cpumask_var(d->send_covered); /* fall through */
+ case sa_this_core_map:
+ free_cpumask_var(d->this_core_map); /* fall through */
+ case sa_this_sibling_map:
+ free_cpumask_var(d->this_sibling_map); /* fall through */
+ case sa_nodemask:
+ free_cpumask_var(d->nodemask); /* fall through */
+ case sa_sched_group_nodes:
+#ifdef CONFIG_NUMA
+ kfree(d->sched_group_nodes); /* fall through */
+ case sa_notcovered:
+ free_cpumask_var(d->notcovered); /* fall through */
+ case sa_covered:
+ free_cpumask_var(d->covered); /* fall through */
+ case sa_domainspan:
+ free_cpumask_var(d->domainspan); /* fall through */
+#endif
+ case sa_none:
+ break;
+ }
+}
+
+static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
+ const struct cpumask *cpu_map)
+{
+#ifdef CONFIG_NUMA
+ if (!alloc_cpumask_var(&d->domainspan, GFP_KERNEL))
+ return sa_none;
+ if (!alloc_cpumask_var(&d->covered, GFP_KERNEL))
+ return sa_domainspan;
+ if (!alloc_cpumask_var(&d->notcovered, GFP_KERNEL))
+ return sa_covered;
+ /* Allocate the per-node list of sched groups */
+ d->sched_group_nodes = kcalloc(nr_node_ids,
+ sizeof(struct sched_group *), GFP_KERNEL);
+ if (!d->sched_group_nodes) {
+ printk(KERN_WARNING "Can not alloc sched group node list\n");
+ return sa_notcovered;
+ }
+ sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes;
+#endif
+ if (!alloc_cpumask_var(&d->nodemask, GFP_KERNEL))
+ return sa_sched_group_nodes;
+ if (!alloc_cpumask_var(&d->this_sibling_map, GFP_KERNEL))
+ return sa_nodemask;
+ if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL))
+ return sa_this_sibling_map;
+ if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL))
+ return sa_this_core_map;
+ if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL))
+ return sa_send_covered;
+ d->rd = alloc_rootdomain();
+ if (!d->rd) {
+ printk(KERN_WARNING "Cannot alloc root domain\n");
+ return sa_tmpmask;
+ }
+ return sa_rootdomain;
+}
+
/*
* Build sched domains for a given set of cpus and attach the sched domains
* to the individual cpus
@@ -8401,50 +8489,17 @@ static void set_domain_attribute(struct sched_domain *sd,
static int __build_sched_domains(const struct cpumask *cpu_map,
struct sched_domain_attr *attr)
{
+ enum s_alloc alloc_state = sa_none;
struct s_data d;
- int i, err = -ENOMEM;
+ int i;
#ifdef CONFIG_NUMA
d.sd_allnodes = 0;
- if (!alloc_cpumask_var(&d.domainspan, GFP_KERNEL))
- goto out;
- if (!alloc_cpumask_var(&d.covered, GFP_KERNEL))
- goto free_domainspan;
- if (!alloc_cpumask_var(&d.notcovered, GFP_KERNEL))
- goto free_covered;
-#endif
-
- if (!alloc_cpumask_var(&d.nodemask, GFP_KERNEL))
- goto free_notcovered;
- if (!alloc_cpumask_var(&d.this_sibling_map, GFP_KERNEL))
- goto free_nodemask;
- if (!alloc_cpumask_var(&d.this_core_map, GFP_KERNEL))
- goto free_this_sibling_map;
- if (!alloc_cpumask_var(&d.send_covered, GFP_KERNEL))
- goto free_this_core_map;
- if (!alloc_cpumask_var(&d.tmpmask, GFP_KERNEL))
- goto free_send_covered;
-
-#ifdef CONFIG_NUMA
- /*
- * Allocate the per-node list of sched groups
- */
- d.sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *),
- GFP_KERNEL);
- if (!d.sched_group_nodes) {
- printk(KERN_WARNING "Can not alloc sched group node list\n");
- goto free_tmpmask;
- }
#endif
- d.rd = alloc_rootdomain();
- if (!d.rd) {
- printk(KERN_WARNING "Cannot alloc root domain\n");
- goto free_sched_groups;
- }
-
-#ifdef CONFIG_NUMA
- sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d.sched_group_nodes;
-#endif
+ alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
+ if (alloc_state != sa_rootdomain)
+ goto error;
+ alloc_state = sa_sched_groups;
/*
* Set up domains for cpus specified by the cpu_map.
@@ -8675,41 +8730,13 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
cpu_attach_domain(sd, d.rd, i);
}
- err = 0;
-
-free_tmpmask:
- free_cpumask_var(d.tmpmask);
-free_send_covered:
- free_cpumask_var(d.send_covered);
-free_this_core_map:
- free_cpumask_var(d.this_core_map);
-free_this_sibling_map:
- free_cpumask_var(d.this_sibling_map);
-free_nodemask:
- free_cpumask_var(d.nodemask);
-free_notcovered:
-#ifdef CONFIG_NUMA
- free_cpumask_var(d.notcovered);
-free_covered:
- free_cpumask_var(d.covered);
-free_domainspan:
- free_cpumask_var(d.domainspan);
-out:
-#endif
- return err;
-
-free_sched_groups:
-#ifdef CONFIG_NUMA
- kfree(d.sched_group_nodes);
-#endif
- goto free_tmpmask;
+ d.sched_group_nodes = NULL; /* don't free this we still need it */
+ __free_domain_allocs(&d, sa_tmpmask, cpu_map);
+ return 0;
-#ifdef CONFIG_NUMA
error:
- free_sched_groups(cpu_map, d.tmpmask);
- free_rootdomain(d.rd);
- goto free_tmpmask;
-#endif
+ __free_domain_allocs(&d, alloc_state, cpu_map);
+ return -ENOMEM;
}
static int build_sched_domains(const struct cpumask *cpu_map)
^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH 03/12] sched: Seperate out build of NUMA sched domain from __build_sched_domains
2009-08-18 10:49 [PATCH 0/12] cleanup __build_sched_domains() Andreas Herrmann
2009-08-18 10:51 ` [PATCH 01/12] sched: Use structure to store local data in __build_sched_domains Andreas Herrmann
2009-08-18 10:53 ` [PATCH 02/12] sched: Separate out allocation/free/goto-hell from __build_sched_domains Andreas Herrmann
@ 2009-08-18 10:54 ` Andreas Herrmann
2009-08-18 16:52 ` [tip:sched/domains] sched: Separate " tip-bot for Andreas Herrmann
2009-08-18 10:54 ` [PATCH 04/12] sched: Seperate out build of CPU " Andreas Herrmann
` (9 subsequent siblings)
12 siblings, 1 reply; 30+ messages in thread
From: Andreas Herrmann @ 2009-08-18 10:54 UTC (permalink / raw)
To: Ingo Molnar, Peter Zijlstra; +Cc: linux-kernel
... to further strip down __build_sched_domains().
Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
---
kernel/sched.c | 57 +++++++++++++++++++++++++++++++------------------------
1 files changed, 32 insertions(+), 25 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index 5aa7dad..e696cd9 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8588,6 +8588,37 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
return sa_rootdomain;
}
+static struct sched_domain *__build_numa_sched_domains(struct s_data *d,
+ const struct cpumask *cpu_map, struct sched_domain_attr *attr, int i)
+{
+ struct sched_domain *sd = NULL;
+#ifdef CONFIG_NUMA
+ struct sched_domain *parent;
+
+ d->sd_allnodes = 0;
+ if (cpumask_weight(cpu_map) >
+ SD_NODES_PER_DOMAIN * cpumask_weight(d->nodemask)) {
+ sd = &per_cpu(allnodes_domains, i).sd;
+ SD_INIT(sd, ALLNODES);
+ set_domain_attribute(sd, attr);
+ cpumask_copy(sched_domain_span(sd), cpu_map);
+ cpu_to_allnodes_group(i, cpu_map, &sd->groups, d->tmpmask);
+ d->sd_allnodes = 1;
+ }
+ parent = sd;
+
+ sd = &per_cpu(node_domains, i).sd;
+ SD_INIT(sd, NODE);
+ set_domain_attribute(sd, attr);
+ sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
+ sd->parent = parent;
+ if (parent)
+ parent->child = sd;
+ cpumask_and(sched_domain_span(sd), sched_domain_span(sd), cpu_map);
+#endif
+ return sd;
+}
+
/*
* Build sched domains for a given set of cpus and attach the sched domains
* to the individual cpus
@@ -8616,31 +8647,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)),
cpu_map);
-#ifdef CONFIG_NUMA
- if (cpumask_weight(cpu_map) >
- SD_NODES_PER_DOMAIN*cpumask_weight(d.nodemask)) {
- sd = &per_cpu(allnodes_domains, i).sd;
- SD_INIT(sd, ALLNODES);
- set_domain_attribute(sd, attr);
- cpumask_copy(sched_domain_span(sd), cpu_map);
- cpu_to_allnodes_group(i, cpu_map, &sd->groups,
- d.tmpmask);
- p = sd;
- d.sd_allnodes = 1;
- } else
- p = NULL;
-
- sd = &per_cpu(node_domains, i).sd;
- SD_INIT(sd, NODE);
- set_domain_attribute(sd, attr);
- sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
- sd->parent = p;
- if (p)
- p->child = sd;
- cpumask_and(sched_domain_span(sd),
- sched_domain_span(sd), cpu_map);
-#endif
-
+ sd = __build_numa_sched_domains(&d, cpu_map, attr, i);
p = sd;
sd = &per_cpu(phys_domains, i).sd;
SD_INIT(sd, CPU);
--
1.6.4
^ permalink raw reply related [flat|nested] 30+ messages in thread* [tip:sched/domains] sched: Separate out build of NUMA sched domain from __build_sched_domains
2009-08-18 10:54 ` [PATCH 03/12] sched: Seperate out build of NUMA sched domain " Andreas Herrmann
@ 2009-08-18 16:52 ` tip-bot for Andreas Herrmann
0 siblings, 0 replies; 30+ messages in thread
From: tip-bot for Andreas Herrmann @ 2009-08-18 16:52 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, hpa, mingo, andreas.herrmann3, peterz, tglx, mingo
Commit-ID: 7f4588f3aa395632fec9ba2e15a1920f0682fda0
Gitweb: http://git.kernel.org/tip/7f4588f3aa395632fec9ba2e15a1920f0682fda0
Author: Andreas Herrmann <andreas.herrmann3@amd.com>
AuthorDate: Tue, 18 Aug 2009 12:54:06 +0200
Committer: Ingo Molnar <mingo@elte.hu>
CommitDate: Tue, 18 Aug 2009 18:35:40 +0200
sched: Separate out build of NUMA sched domain from __build_sched_domains
... to further strip down __build_sched_domains().
Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
Cc: Peter Zijlstra <peterz@infradead.org>
LKML-Reference: <20090818105406.GD29515@alberich.amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
kernel/sched.c | 57 +++++++++++++++++++++++++++++++------------------------
1 files changed, 32 insertions(+), 25 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index c5d1fee..dd95a47 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8482,6 +8482,37 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
return sa_rootdomain;
}
+static struct sched_domain *__build_numa_sched_domains(struct s_data *d,
+ const struct cpumask *cpu_map, struct sched_domain_attr *attr, int i)
+{
+ struct sched_domain *sd = NULL;
+#ifdef CONFIG_NUMA
+ struct sched_domain *parent;
+
+ d->sd_allnodes = 0;
+ if (cpumask_weight(cpu_map) >
+ SD_NODES_PER_DOMAIN * cpumask_weight(d->nodemask)) {
+ sd = &per_cpu(allnodes_domains, i).sd;
+ SD_INIT(sd, ALLNODES);
+ set_domain_attribute(sd, attr);
+ cpumask_copy(sched_domain_span(sd), cpu_map);
+ cpu_to_allnodes_group(i, cpu_map, &sd->groups, d->tmpmask);
+ d->sd_allnodes = 1;
+ }
+ parent = sd;
+
+ sd = &per_cpu(node_domains, i).sd;
+ SD_INIT(sd, NODE);
+ set_domain_attribute(sd, attr);
+ sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
+ sd->parent = parent;
+ if (parent)
+ parent->child = sd;
+ cpumask_and(sched_domain_span(sd), sched_domain_span(sd), cpu_map);
+#endif
+ return sd;
+}
+
/*
* Build sched domains for a given set of cpus and attach the sched domains
* to the individual cpus
@@ -8510,31 +8541,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)),
cpu_map);
-#ifdef CONFIG_NUMA
- if (cpumask_weight(cpu_map) >
- SD_NODES_PER_DOMAIN*cpumask_weight(d.nodemask)) {
- sd = &per_cpu(allnodes_domains, i).sd;
- SD_INIT(sd, ALLNODES);
- set_domain_attribute(sd, attr);
- cpumask_copy(sched_domain_span(sd), cpu_map);
- cpu_to_allnodes_group(i, cpu_map, &sd->groups,
- d.tmpmask);
- p = sd;
- d.sd_allnodes = 1;
- } else
- p = NULL;
-
- sd = &per_cpu(node_domains, i).sd;
- SD_INIT(sd, NODE);
- set_domain_attribute(sd, attr);
- sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
- sd->parent = p;
- if (p)
- p->child = sd;
- cpumask_and(sched_domain_span(sd),
- sched_domain_span(sd), cpu_map);
-#endif
-
+ sd = __build_numa_sched_domains(&d, cpu_map, attr, i);
p = sd;
sd = &per_cpu(phys_domains, i).sd;
SD_INIT(sd, CPU);
^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH 04/12] sched: Seperate out build of CPU sched domain from __build_sched_domains
2009-08-18 10:49 [PATCH 0/12] cleanup __build_sched_domains() Andreas Herrmann
` (2 preceding siblings ...)
2009-08-18 10:54 ` [PATCH 03/12] sched: Seperate out build of NUMA sched domain " Andreas Herrmann
@ 2009-08-18 10:54 ` Andreas Herrmann
2009-08-18 16:52 ` [tip:sched/domains] sched: Separate " tip-bot for Andreas Herrmann
2009-08-18 10:56 ` [PATCH 05/12] sched: Seperate out build of MC " Andreas Herrmann
` (8 subsequent siblings)
12 siblings, 1 reply; 30+ messages in thread
From: Andreas Herrmann @ 2009-08-18 10:54 UTC (permalink / raw)
To: Ingo Molnar, Peter Zijlstra; +Cc: linux-kernel
... to further strip down __build_sched_domains().
Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
---
kernel/sched.c | 26 +++++++++++++++++---------
1 files changed, 17 insertions(+), 9 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index e696cd9..a2e09f4 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8619,6 +8619,22 @@ static struct sched_domain *__build_numa_sched_domains(struct s_data *d,
return sd;
}
+static struct sched_domain *__build_cpu_sched_domain(struct s_data *d,
+ const struct cpumask *cpu_map, struct sched_domain_attr *attr,
+ struct sched_domain *parent, int i)
+{
+ struct sched_domain *sd;
+ sd = &per_cpu(phys_domains, i).sd;
+ SD_INIT(sd, CPU);
+ set_domain_attribute(sd, attr);
+ cpumask_copy(sched_domain_span(sd), d->nodemask);
+ sd->parent = parent;
+ if (parent)
+ parent->child = sd;
+ cpu_to_phys_group(i, cpu_map, &sd->groups, d->tmpmask);
+ return sd;
+}
+
/*
* Build sched domains for a given set of cpus and attach the sched domains
* to the individual cpus
@@ -8648,15 +8664,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
cpu_map);
sd = __build_numa_sched_domains(&d, cpu_map, attr, i);
- p = sd;
- sd = &per_cpu(phys_domains, i).sd;
- SD_INIT(sd, CPU);
- set_domain_attribute(sd, attr);
- cpumask_copy(sched_domain_span(sd), d.nodemask);
- sd->parent = p;
- if (p)
- p->child = sd;
- cpu_to_phys_group(i, cpu_map, &sd->groups, d.tmpmask);
+ sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i);
#ifdef CONFIG_SCHED_MC
p = sd;
--
1.6.4
^ permalink raw reply related [flat|nested] 30+ messages in thread* [tip:sched/domains] sched: Separate out build of CPU sched domain from __build_sched_domains
2009-08-18 10:54 ` [PATCH 04/12] sched: Seperate out build of CPU " Andreas Herrmann
@ 2009-08-18 16:52 ` tip-bot for Andreas Herrmann
0 siblings, 0 replies; 30+ messages in thread
From: tip-bot for Andreas Herrmann @ 2009-08-18 16:52 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, hpa, mingo, andreas.herrmann3, peterz, tglx, mingo
Commit-ID: 87cce6622c2ab2f0e96ecc2a37133378a7db3177
Gitweb: http://git.kernel.org/tip/87cce6622c2ab2f0e96ecc2a37133378a7db3177
Author: Andreas Herrmann <andreas.herrmann3@amd.com>
AuthorDate: Tue, 18 Aug 2009 12:54:55 +0200
Committer: Ingo Molnar <mingo@elte.hu>
CommitDate: Tue, 18 Aug 2009 18:35:40 +0200
sched: Separate out build of CPU sched domain from __build_sched_domains
... to further strip down __build_sched_domains().
Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
Cc: Peter Zijlstra <peterz@infradead.org>
LKML-Reference: <20090818105455.GE29515@alberich.amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
kernel/sched.c | 26 +++++++++++++++++---------
1 files changed, 17 insertions(+), 9 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index dd95a47..3d0666c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8513,6 +8513,22 @@ static struct sched_domain *__build_numa_sched_domains(struct s_data *d,
return sd;
}
+static struct sched_domain *__build_cpu_sched_domain(struct s_data *d,
+ const struct cpumask *cpu_map, struct sched_domain_attr *attr,
+ struct sched_domain *parent, int i)
+{
+ struct sched_domain *sd;
+ sd = &per_cpu(phys_domains, i).sd;
+ SD_INIT(sd, CPU);
+ set_domain_attribute(sd, attr);
+ cpumask_copy(sched_domain_span(sd), d->nodemask);
+ sd->parent = parent;
+ if (parent)
+ parent->child = sd;
+ cpu_to_phys_group(i, cpu_map, &sd->groups, d->tmpmask);
+ return sd;
+}
+
/*
* Build sched domains for a given set of cpus and attach the sched domains
* to the individual cpus
@@ -8542,15 +8558,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
cpu_map);
sd = __build_numa_sched_domains(&d, cpu_map, attr, i);
- p = sd;
- sd = &per_cpu(phys_domains, i).sd;
- SD_INIT(sd, CPU);
- set_domain_attribute(sd, attr);
- cpumask_copy(sched_domain_span(sd), d.nodemask);
- sd->parent = p;
- if (p)
- p->child = sd;
- cpu_to_phys_group(i, cpu_map, &sd->groups, d.tmpmask);
+ sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i);
#ifdef CONFIG_SCHED_MC
p = sd;
^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH 05/12] sched: Seperate out build of MC sched domain from __build_sched_domains
2009-08-18 10:49 [PATCH 0/12] cleanup __build_sched_domains() Andreas Herrmann
` (3 preceding siblings ...)
2009-08-18 10:54 ` [PATCH 04/12] sched: Seperate out build of CPU " Andreas Herrmann
@ 2009-08-18 10:56 ` Andreas Herrmann
2009-08-18 16:52 ` [tip:sched/domains] sched: Separate " tip-bot for Andreas Herrmann
2009-08-18 10:57 ` [PATCH 06/12] sched: Seperate out build of SMT " Andreas Herrmann
` (7 subsequent siblings)
12 siblings, 1 reply; 30+ messages in thread
From: Andreas Herrmann @ 2009-08-18 10:56 UTC (permalink / raw)
To: Ingo Molnar, Peter Zijlstra; +Cc: linux-kernel
... to further strip down __build_sched_domains().
Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
---
kernel/sched.c | 30 ++++++++++++++++++------------
1 files changed, 18 insertions(+), 12 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index a2e09f4..e24cb49 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8635,6 +8635,23 @@ static struct sched_domain *__build_cpu_sched_domain(struct s_data *d,
return sd;
}
+static struct sched_domain *__build_mc_sched_domain(struct s_data *d,
+ const struct cpumask *cpu_map, struct sched_domain_attr *attr,
+ struct sched_domain *parent, int i)
+{
+ struct sched_domain *sd = parent;
+#ifdef CONFIG_SCHED_MC
+ sd = &per_cpu(core_domains, i).sd;
+ SD_INIT(sd, MC);
+ set_domain_attribute(sd, attr);
+ cpumask_and(sched_domain_span(sd), cpu_map, cpu_coregroup_mask(i));
+ sd->parent = parent;
+ parent->child = sd;
+ cpu_to_core_group(i, cpu_map, &sd->groups, d->tmpmask);
+#endif
+ return sd;
+}
+
/*
* Build sched domains for a given set of cpus and attach the sched domains
* to the individual cpus
@@ -8665,18 +8682,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
sd = __build_numa_sched_domains(&d, cpu_map, attr, i);
sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i);
-
-#ifdef CONFIG_SCHED_MC
- p = sd;
- sd = &per_cpu(core_domains, i).sd;
- SD_INIT(sd, MC);
- set_domain_attribute(sd, attr);
- cpumask_and(sched_domain_span(sd), cpu_map,
- cpu_coregroup_mask(i));
- sd->parent = p;
- p->child = sd;
- cpu_to_core_group(i, cpu_map, &sd->groups, d.tmpmask);
-#endif
+ sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i);
#ifdef CONFIG_SCHED_SMT
p = sd;
--
1.6.4
^ permalink raw reply related [flat|nested] 30+ messages in thread* [tip:sched/domains] sched: Separate out build of MC sched domain from __build_sched_domains
2009-08-18 10:56 ` [PATCH 05/12] sched: Seperate out build of MC " Andreas Herrmann
@ 2009-08-18 16:52 ` tip-bot for Andreas Herrmann
0 siblings, 0 replies; 30+ messages in thread
From: tip-bot for Andreas Herrmann @ 2009-08-18 16:52 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, hpa, mingo, andreas.herrmann3, peterz, tglx, mingo
Commit-ID: 410c408108bb85f32fe132aaf448388af0b6da64
Gitweb: http://git.kernel.org/tip/410c408108bb85f32fe132aaf448388af0b6da64
Author: Andreas Herrmann <andreas.herrmann3@amd.com>
AuthorDate: Tue, 18 Aug 2009 12:56:14 +0200
Committer: Ingo Molnar <mingo@elte.hu>
CommitDate: Tue, 18 Aug 2009 18:35:41 +0200
sched: Separate out build of MC sched domain from __build_sched_domains
... to further strip down __build_sched_domains().
Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
Cc: Peter Zijlstra <peterz@infradead.org>
LKML-Reference: <20090818105614.GF29515@alberich.amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
kernel/sched.c | 30 ++++++++++++++++++------------
1 files changed, 18 insertions(+), 12 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index 3d0666c..5c829d4 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8529,6 +8529,23 @@ static struct sched_domain *__build_cpu_sched_domain(struct s_data *d,
return sd;
}
+static struct sched_domain *__build_mc_sched_domain(struct s_data *d,
+ const struct cpumask *cpu_map, struct sched_domain_attr *attr,
+ struct sched_domain *parent, int i)
+{
+ struct sched_domain *sd = parent;
+#ifdef CONFIG_SCHED_MC
+ sd = &per_cpu(core_domains, i).sd;
+ SD_INIT(sd, MC);
+ set_domain_attribute(sd, attr);
+ cpumask_and(sched_domain_span(sd), cpu_map, cpu_coregroup_mask(i));
+ sd->parent = parent;
+ parent->child = sd;
+ cpu_to_core_group(i, cpu_map, &sd->groups, d->tmpmask);
+#endif
+ return sd;
+}
+
/*
* Build sched domains for a given set of cpus and attach the sched domains
* to the individual cpus
@@ -8559,18 +8576,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
sd = __build_numa_sched_domains(&d, cpu_map, attr, i);
sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i);
-
-#ifdef CONFIG_SCHED_MC
- p = sd;
- sd = &per_cpu(core_domains, i).sd;
- SD_INIT(sd, MC);
- set_domain_attribute(sd, attr);
- cpumask_and(sched_domain_span(sd), cpu_map,
- cpu_coregroup_mask(i));
- sd->parent = p;
- p->child = sd;
- cpu_to_core_group(i, cpu_map, &sd->groups, d.tmpmask);
-#endif
+ sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i);
#ifdef CONFIG_SCHED_SMT
p = sd;
^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH 06/12] sched: Seperate out build of SMT sched domain from __build_sched_domains
2009-08-18 10:49 [PATCH 0/12] cleanup __build_sched_domains() Andreas Herrmann
` (4 preceding siblings ...)
2009-08-18 10:56 ` [PATCH 05/12] sched: Seperate out build of MC " Andreas Herrmann
@ 2009-08-18 10:57 ` Andreas Herrmann
2009-08-18 16:52 ` [tip:sched/domains] sched: Separate " tip-bot for Andreas Herrmann
2009-08-18 10:57 ` [PATCH 07/12] sched: Seperate out build of SMT sched groups " Andreas Herrmann
` (6 subsequent siblings)
12 siblings, 1 reply; 30+ messages in thread
From: Andreas Herrmann @ 2009-08-18 10:57 UTC (permalink / raw)
To: Ingo Molnar, Peter Zijlstra; +Cc: linux-kernel
... to further strip down __build_sched_domains().
Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
---
kernel/sched.c | 32 +++++++++++++++++++-------------
1 files changed, 19 insertions(+), 13 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index e24cb49..a253ca6 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8652,6 +8652,23 @@ static struct sched_domain *__build_mc_sched_domain(struct s_data *d,
return sd;
}
+static struct sched_domain *__build_smt_sched_domain(struct s_data *d,
+ const struct cpumask *cpu_map, struct sched_domain_attr *attr,
+ struct sched_domain *parent, int i)
+{
+ struct sched_domain *sd = parent;
+#ifdef CONFIG_SCHED_SMT
+ sd = &per_cpu(cpu_domains, i).sd;
+ SD_INIT(sd, SIBLING);
+ set_domain_attribute(sd, attr);
+ cpumask_and(sched_domain_span(sd), cpu_map, topology_thread_cpumask(i));
+ sd->parent = parent;
+ parent->child = sd;
+ cpu_to_cpu_group(i, cpu_map, &sd->groups, d->tmpmask);
+#endif
+ return sd;
+}
+
/*
* Build sched domains for a given set of cpus and attach the sched domains
* to the individual cpus
@@ -8675,7 +8692,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
* Set up domains for cpus specified by the cpu_map.
*/
for_each_cpu(i, cpu_map) {
- struct sched_domain *sd = NULL, *p;
+ struct sched_domain *sd;
cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)),
cpu_map);
@@ -8683,18 +8700,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
sd = __build_numa_sched_domains(&d, cpu_map, attr, i);
sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i);
sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i);
-
-#ifdef CONFIG_SCHED_SMT
- p = sd;
- sd = &per_cpu(cpu_domains, i).sd;
- SD_INIT(sd, SIBLING);
- set_domain_attribute(sd, attr);
- cpumask_and(sched_domain_span(sd),
- topology_thread_cpumask(i), cpu_map);
- sd->parent = p;
- p->child = sd;
- cpu_to_cpu_group(i, cpu_map, &sd->groups, d.tmpmask);
-#endif
+ sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i);
}
#ifdef CONFIG_SCHED_SMT
--
1.6.4
^ permalink raw reply related [flat|nested] 30+ messages in thread* [tip:sched/domains] sched: Separate out build of SMT sched domain from __build_sched_domains
2009-08-18 10:57 ` [PATCH 06/12] sched: Seperate out build of SMT " Andreas Herrmann
@ 2009-08-18 16:52 ` tip-bot for Andreas Herrmann
0 siblings, 0 replies; 30+ messages in thread
From: tip-bot for Andreas Herrmann @ 2009-08-18 16:52 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, hpa, mingo, andreas.herrmann3, peterz, tglx, mingo
Commit-ID: d81735355533cd4b2bce9508d86fcad24a38cf47
Gitweb: http://git.kernel.org/tip/d81735355533cd4b2bce9508d86fcad24a38cf47
Author: Andreas Herrmann <andreas.herrmann3@amd.com>
AuthorDate: Tue, 18 Aug 2009 12:57:03 +0200
Committer: Ingo Molnar <mingo@elte.hu>
CommitDate: Tue, 18 Aug 2009 18:35:42 +0200
sched: Separate out build of SMT sched domain from __build_sched_domains
... to further strip down __build_sched_domains().
Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
Cc: Peter Zijlstra <peterz@infradead.org>
LKML-Reference: <20090818105703.GG29515@alberich.amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
kernel/sched.c | 32 +++++++++++++++++++-------------
1 files changed, 19 insertions(+), 13 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index 5c829d4..2ecec06 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8546,6 +8546,23 @@ static struct sched_domain *__build_mc_sched_domain(struct s_data *d,
return sd;
}
+static struct sched_domain *__build_smt_sched_domain(struct s_data *d,
+ const struct cpumask *cpu_map, struct sched_domain_attr *attr,
+ struct sched_domain *parent, int i)
+{
+ struct sched_domain *sd = parent;
+#ifdef CONFIG_SCHED_SMT
+ sd = &per_cpu(cpu_domains, i).sd;
+ SD_INIT(sd, SIBLING);
+ set_domain_attribute(sd, attr);
+ cpumask_and(sched_domain_span(sd), cpu_map, topology_thread_cpumask(i));
+ sd->parent = parent;
+ parent->child = sd;
+ cpu_to_cpu_group(i, cpu_map, &sd->groups, d->tmpmask);
+#endif
+ return sd;
+}
+
/*
* Build sched domains for a given set of cpus and attach the sched domains
* to the individual cpus
@@ -8569,7 +8586,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
* Set up domains for cpus specified by the cpu_map.
*/
for_each_cpu(i, cpu_map) {
- struct sched_domain *sd = NULL, *p;
+ struct sched_domain *sd;
cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)),
cpu_map);
@@ -8577,18 +8594,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
sd = __build_numa_sched_domains(&d, cpu_map, attr, i);
sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i);
sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i);
-
-#ifdef CONFIG_SCHED_SMT
- p = sd;
- sd = &per_cpu(cpu_domains, i).sd;
- SD_INIT(sd, SIBLING);
- set_domain_attribute(sd, attr);
- cpumask_and(sched_domain_span(sd),
- topology_thread_cpumask(i), cpu_map);
- sd->parent = p;
- p->child = sd;
- cpu_to_cpu_group(i, cpu_map, &sd->groups, d.tmpmask);
-#endif
+ sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i);
}
#ifdef CONFIG_SCHED_SMT
^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH 07/12] sched: Seperate out build of SMT sched groups from __build_sched_domains
2009-08-18 10:49 [PATCH 0/12] cleanup __build_sched_domains() Andreas Herrmann
` (5 preceding siblings ...)
2009-08-18 10:57 ` [PATCH 06/12] sched: Seperate out build of SMT " Andreas Herrmann
@ 2009-08-18 10:57 ` Andreas Herrmann
2009-08-18 16:53 ` [tip:sched/domains] sched: Separate " tip-bot for Andreas Herrmann
2009-08-18 10:58 ` [PATCH 08/12] sched: Seperate out build of MC " Andreas Herrmann
` (5 subsequent siblings)
12 siblings, 1 reply; 30+ messages in thread
From: Andreas Herrmann @ 2009-08-18 10:57 UTC (permalink / raw)
To: Ingo Molnar, Peter Zijlstra; +Cc: linux-kernel
... to further strip down __build_sched_domains().
Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
---
kernel/sched.c | 31 ++++++++++++++++++++-----------
1 files changed, 20 insertions(+), 11 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index a253ca6..cd00d9e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8669,6 +8669,25 @@ static struct sched_domain *__build_smt_sched_domain(struct s_data *d,
return sd;
}
+static void build_sched_groups(struct s_data *d, enum sched_domain_level l,
+ const struct cpumask *cpu_map, int cpu)
+{
+ switch (l) {
+#ifdef CONFIG_SCHED_SMT
+ case SD_LV_SIBLING: /* set up CPU (sibling) groups */
+ cpumask_and(d->this_sibling_map, cpu_map,
+ topology_thread_cpumask(cpu));
+ if (cpu == cpumask_first(d->this_sibling_map))
+ init_sched_build_groups(d->this_sibling_map, cpu_map,
+ &cpu_to_cpu_group,
+ d->send_covered, d->tmpmask);
+ break;
+#endif
+ default:
+ break;
+ }
+}
+
/*
* Build sched domains for a given set of cpus and attach the sched domains
* to the individual cpus
@@ -8703,19 +8722,9 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i);
}
-#ifdef CONFIG_SCHED_SMT
- /* Set up CPU (sibling) groups */
for_each_cpu(i, cpu_map) {
- cpumask_and(d.this_sibling_map,
- topology_thread_cpumask(i), cpu_map);
- if (i != cpumask_first(d.this_sibling_map))
- continue;
-
- init_sched_build_groups(d.this_sibling_map, cpu_map,
- &cpu_to_cpu_group,
- d.send_covered, d.tmpmask);
+ build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i);
}
-#endif
#ifdef CONFIG_SCHED_MC
/* Set up multi-core groups */
--
1.6.4
^ permalink raw reply related [flat|nested] 30+ messages in thread* [tip:sched/domains] sched: Separate out build of SMT sched groups from __build_sched_domains
2009-08-18 10:57 ` [PATCH 07/12] sched: Seperate out build of SMT sched groups " Andreas Herrmann
@ 2009-08-18 16:53 ` tip-bot for Andreas Herrmann
0 siblings, 0 replies; 30+ messages in thread
From: tip-bot for Andreas Herrmann @ 2009-08-18 16:53 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, hpa, mingo, andreas.herrmann3, peterz, tglx, mingo
Commit-ID: 0e8e85c941d8f1b43bcc2e3b8b7026cdae476c53
Gitweb: http://git.kernel.org/tip/0e8e85c941d8f1b43bcc2e3b8b7026cdae476c53
Author: Andreas Herrmann <andreas.herrmann3@amd.com>
AuthorDate: Tue, 18 Aug 2009 12:57:51 +0200
Committer: Ingo Molnar <mingo@elte.hu>
CommitDate: Tue, 18 Aug 2009 18:35:42 +0200
sched: Separate out build of SMT sched groups from __build_sched_domains
... to further strip down __build_sched_domains().
Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
Cc: Peter Zijlstra <peterz@infradead.org>
LKML-Reference: <20090818105751.GH29515@alberich.amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
kernel/sched.c | 31 ++++++++++++++++++++-----------
1 files changed, 20 insertions(+), 11 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index 2ecec06..43cfc6e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8563,6 +8563,25 @@ static struct sched_domain *__build_smt_sched_domain(struct s_data *d,
return sd;
}
+static void build_sched_groups(struct s_data *d, enum sched_domain_level l,
+ const struct cpumask *cpu_map, int cpu)
+{
+ switch (l) {
+#ifdef CONFIG_SCHED_SMT
+ case SD_LV_SIBLING: /* set up CPU (sibling) groups */
+ cpumask_and(d->this_sibling_map, cpu_map,
+ topology_thread_cpumask(cpu));
+ if (cpu == cpumask_first(d->this_sibling_map))
+ init_sched_build_groups(d->this_sibling_map, cpu_map,
+ &cpu_to_cpu_group,
+ d->send_covered, d->tmpmask);
+ break;
+#endif
+ default:
+ break;
+ }
+}
+
/*
* Build sched domains for a given set of cpus and attach the sched domains
* to the individual cpus
@@ -8597,19 +8616,9 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i);
}
-#ifdef CONFIG_SCHED_SMT
- /* Set up CPU (sibling) groups */
for_each_cpu(i, cpu_map) {
- cpumask_and(d.this_sibling_map,
- topology_thread_cpumask(i), cpu_map);
- if (i != cpumask_first(d.this_sibling_map))
- continue;
-
- init_sched_build_groups(d.this_sibling_map, cpu_map,
- &cpu_to_cpu_group,
- d.send_covered, d.tmpmask);
+ build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i);
}
-#endif
#ifdef CONFIG_SCHED_MC
/* Set up multi-core groups */
^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH 08/12] sched: Seperate out build of MC sched groups from __build_sched_domains
2009-08-18 10:49 [PATCH 0/12] cleanup __build_sched_domains() Andreas Herrmann
` (6 preceding siblings ...)
2009-08-18 10:57 ` [PATCH 07/12] sched: Seperate out build of SMT sched groups " Andreas Herrmann
@ 2009-08-18 10:58 ` Andreas Herrmann
2009-08-18 16:53 ` [tip:sched/domains] sched: Separate " tip-bot for Andreas Herrmann
2009-08-18 10:59 ` [PATCH 09/12] sched: Seperate out build of CPU " Andreas Herrmann
` (4 subsequent siblings)
12 siblings, 1 reply; 30+ messages in thread
From: Andreas Herrmann @ 2009-08-18 10:58 UTC (permalink / raw)
To: Ingo Molnar, Peter Zijlstra; +Cc: linux-kernel
... to further strip down __build_sched_domains().
Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
---
kernel/sched.c | 23 ++++++++++-------------
1 files changed, 10 insertions(+), 13 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index cd00d9e..d045e5c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8683,6 +8683,15 @@ static void build_sched_groups(struct s_data *d, enum sched_domain_level l,
d->send_covered, d->tmpmask);
break;
#endif
+#ifdef CONFIG_SCHED_MC
+ case SD_LV_MC: /* set up multi-core groups */
+ cpumask_and(d->this_core_map, cpu_map, cpu_coregroup_mask(cpu));
+ if (cpu == cpumask_first(d->this_core_map))
+ init_sched_build_groups(d->this_core_map, cpu_map,
+ &cpu_to_core_group,
+ d->send_covered, d->tmpmask);
+ break;
+#endif
default:
break;
}
@@ -8724,21 +8733,9 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
for_each_cpu(i, cpu_map) {
build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i);
+ build_sched_groups(&d, SD_LV_MC, cpu_map, i);
}
-#ifdef CONFIG_SCHED_MC
- /* Set up multi-core groups */
- for_each_cpu(i, cpu_map) {
- cpumask_and(d.this_core_map, cpu_coregroup_mask(i), cpu_map);
- if (i != cpumask_first(d.this_core_map))
- continue;
-
- init_sched_build_groups(d.this_core_map, cpu_map,
- &cpu_to_core_group,
- d.send_covered, d.tmpmask);
- }
-#endif
-
/* Set up physical groups */
for (i = 0; i < nr_node_ids; i++) {
cpumask_and(d.nodemask, cpumask_of_node(i), cpu_map);
--
1.6.4
^ permalink raw reply related [flat|nested] 30+ messages in thread* [tip:sched/domains] sched: Separate out build of MC sched groups from __build_sched_domains
2009-08-18 10:58 ` [PATCH 08/12] sched: Seperate out build of MC " Andreas Herrmann
@ 2009-08-18 16:53 ` tip-bot for Andreas Herrmann
0 siblings, 0 replies; 30+ messages in thread
From: tip-bot for Andreas Herrmann @ 2009-08-18 16:53 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, hpa, mingo, andreas.herrmann3, peterz, tglx, mingo
Commit-ID: a2af04cdbb748158043e31799b28c48272081600
Gitweb: http://git.kernel.org/tip/a2af04cdbb748158043e31799b28c48272081600
Author: Andreas Herrmann <andreas.herrmann3@amd.com>
AuthorDate: Tue, 18 Aug 2009 12:58:38 +0200
Committer: Ingo Molnar <mingo@elte.hu>
CommitDate: Tue, 18 Aug 2009 18:35:43 +0200
sched: Separate out build of MC sched groups from __build_sched_domains
... to further strip down __build_sched_domains().
Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
Cc: Peter Zijlstra <peterz@infradead.org>
LKML-Reference: <20090818105838.GI29515@alberich.amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
kernel/sched.c | 23 ++++++++++-------------
1 files changed, 10 insertions(+), 13 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index 43cfc6e..f2c202f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8577,6 +8577,15 @@ static void build_sched_groups(struct s_data *d, enum sched_domain_level l,
d->send_covered, d->tmpmask);
break;
#endif
+#ifdef CONFIG_SCHED_MC
+ case SD_LV_MC: /* set up multi-core groups */
+ cpumask_and(d->this_core_map, cpu_map, cpu_coregroup_mask(cpu));
+ if (cpu == cpumask_first(d->this_core_map))
+ init_sched_build_groups(d->this_core_map, cpu_map,
+ &cpu_to_core_group,
+ d->send_covered, d->tmpmask);
+ break;
+#endif
default:
break;
}
@@ -8618,21 +8627,9 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
for_each_cpu(i, cpu_map) {
build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i);
+ build_sched_groups(&d, SD_LV_MC, cpu_map, i);
}
-#ifdef CONFIG_SCHED_MC
- /* Set up multi-core groups */
- for_each_cpu(i, cpu_map) {
- cpumask_and(d.this_core_map, cpu_coregroup_mask(i), cpu_map);
- if (i != cpumask_first(d.this_core_map))
- continue;
-
- init_sched_build_groups(d.this_core_map, cpu_map,
- &cpu_to_core_group,
- d.send_covered, d.tmpmask);
- }
-#endif
-
/* Set up physical groups */
for (i = 0; i < nr_node_ids; i++) {
cpumask_and(d.nodemask, cpumask_of_node(i), cpu_map);
^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH 09/12] sched: Seperate out build of CPU sched groups from __build_sched_domains
2009-08-18 10:49 [PATCH 0/12] cleanup __build_sched_domains() Andreas Herrmann
` (7 preceding siblings ...)
2009-08-18 10:58 ` [PATCH 08/12] sched: Seperate out build of MC " Andreas Herrmann
@ 2009-08-18 10:59 ` Andreas Herrmann
2009-08-18 16:53 ` [tip:sched/domains] sched: Separate " tip-bot for Andreas Herrmann
2009-08-18 11:00 ` [PATCH 10/12] sched: Seperate out build of ALLNODES " Andreas Herrmann
` (3 subsequent siblings)
12 siblings, 1 reply; 30+ messages in thread
From: Andreas Herrmann @ 2009-08-18 10:59 UTC (permalink / raw)
To: Ingo Molnar, Peter Zijlstra; +Cc: linux-kernel
... to further strip down __build_sched_domains().
Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
---
kernel/sched.c | 18 +++++++++---------
1 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index d045e5c..666017b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8692,6 +8692,13 @@ static void build_sched_groups(struct s_data *d, enum sched_domain_level l,
d->send_covered, d->tmpmask);
break;
#endif
+ case SD_LV_CPU: /* set up physical groups */
+ cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map);
+ if (!cpumask_empty(d->nodemask))
+ init_sched_build_groups(d->nodemask, cpu_map,
+ &cpu_to_phys_group,
+ d->send_covered, d->tmpmask);
+ break;
default:
break;
}
@@ -8737,15 +8744,8 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
}
/* Set up physical groups */
- for (i = 0; i < nr_node_ids; i++) {
- cpumask_and(d.nodemask, cpumask_of_node(i), cpu_map);
- if (cpumask_empty(d.nodemask))
- continue;
-
- init_sched_build_groups(d.nodemask, cpu_map,
- &cpu_to_phys_group,
- d.send_covered, d.tmpmask);
- }
+ for (i = 0; i < nr_node_ids; i++)
+ build_sched_groups(&d, SD_LV_CPU, cpu_map, i);
#ifdef CONFIG_NUMA
/* Set up node groups */
--
1.6.4
^ permalink raw reply related [flat|nested] 30+ messages in thread* [tip:sched/domains] sched: Separate out build of CPU sched groups from __build_sched_domains
2009-08-18 10:59 ` [PATCH 09/12] sched: Seperate out build of CPU " Andreas Herrmann
@ 2009-08-18 16:53 ` tip-bot for Andreas Herrmann
0 siblings, 0 replies; 30+ messages in thread
From: tip-bot for Andreas Herrmann @ 2009-08-18 16:53 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, hpa, mingo, andreas.herrmann3, peterz, tglx, mingo
Commit-ID: 86548096f252bfe2065f1ea2d301e7319a16375d
Gitweb: http://git.kernel.org/tip/86548096f252bfe2065f1ea2d301e7319a16375d
Author: Andreas Herrmann <andreas.herrmann3@amd.com>
AuthorDate: Tue, 18 Aug 2009 12:59:28 +0200
Committer: Ingo Molnar <mingo@elte.hu>
CommitDate: Tue, 18 Aug 2009 18:35:43 +0200
sched: Separate out build of CPU sched groups from __build_sched_domains
... to further strip down __build_sched_domains().
Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
Cc: Peter Zijlstra <peterz@infradead.org>
LKML-Reference: <20090818105928.GJ29515@alberich.amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
kernel/sched.c | 18 +++++++++---------
1 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index f2c202f..b09a41c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8586,6 +8586,13 @@ static void build_sched_groups(struct s_data *d, enum sched_domain_level l,
d->send_covered, d->tmpmask);
break;
#endif
+ case SD_LV_CPU: /* set up physical groups */
+ cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map);
+ if (!cpumask_empty(d->nodemask))
+ init_sched_build_groups(d->nodemask, cpu_map,
+ &cpu_to_phys_group,
+ d->send_covered, d->tmpmask);
+ break;
default:
break;
}
@@ -8631,15 +8638,8 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
}
/* Set up physical groups */
- for (i = 0; i < nr_node_ids; i++) {
- cpumask_and(d.nodemask, cpumask_of_node(i), cpu_map);
- if (cpumask_empty(d.nodemask))
- continue;
-
- init_sched_build_groups(d.nodemask, cpu_map,
- &cpu_to_phys_group,
- d.send_covered, d.tmpmask);
- }
+ for (i = 0; i < nr_node_ids; i++)
+ build_sched_groups(&d, SD_LV_CPU, cpu_map, i);
#ifdef CONFIG_NUMA
/* Set up node groups */
^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH 10/12] sched: Seperate out build of ALLNODES sched groups from __build_sched_domains
2009-08-18 10:49 [PATCH 0/12] cleanup __build_sched_domains() Andreas Herrmann
` (8 preceding siblings ...)
2009-08-18 10:59 ` [PATCH 09/12] sched: Seperate out build of CPU " Andreas Herrmann
@ 2009-08-18 11:00 ` Andreas Herrmann
2009-08-18 16:53 ` [tip:sched/domains] sched: Separate " tip-bot for Andreas Herrmann
2009-08-18 11:01 ` [PATCH 11/12] sched: Seperate out build of NUMA " Andreas Herrmann
` (2 subsequent siblings)
12 siblings, 1 reply; 30+ messages in thread
From: Andreas Herrmann @ 2009-08-18 11:00 UTC (permalink / raw)
To: Ingo Molnar, Peter Zijlstra; +Cc: linux-kernel
For the sake of completeness.
Now all calls to init_sched_build_groups() are contained in
build_sched_groups().
Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
---
kernel/sched.c | 13 ++++++++-----
1 files changed, 8 insertions(+), 5 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index 666017b..8f53701 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8699,6 +8699,12 @@ static void build_sched_groups(struct s_data *d, enum sched_domain_level l,
&cpu_to_phys_group,
d->send_covered, d->tmpmask);
break;
+#ifdef CONFIG_NUMA
+ case SD_LV_ALLNODES:
+ init_sched_build_groups(cpu_map, cpu_map, &cpu_to_allnodes_group,
+ d->send_covered, d->tmpmask);
+ break;
+#endif
default:
break;
}
@@ -8749,11 +8755,8 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
#ifdef CONFIG_NUMA
/* Set up node groups */
- if (d.sd_allnodes) {
- init_sched_build_groups(cpu_map, cpu_map,
- &cpu_to_allnodes_group,
- d.send_covered, d.tmpmask);
- }
+ if (d.sd_allnodes)
+ build_sched_groups(&d, SD_LV_ALLNODES, cpu_map, 0);
for (i = 0; i < nr_node_ids; i++) {
/* Set up node groups */
--
1.6.4
^ permalink raw reply related [flat|nested] 30+ messages in thread* [tip:sched/domains] sched: Separate out build of ALLNODES sched groups from __build_sched_domains
2009-08-18 11:00 ` [PATCH 10/12] sched: Seperate out build of ALLNODES " Andreas Herrmann
@ 2009-08-18 16:53 ` tip-bot for Andreas Herrmann
0 siblings, 0 replies; 30+ messages in thread
From: tip-bot for Andreas Herrmann @ 2009-08-18 16:53 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, hpa, mingo, andreas.herrmann3, peterz, tglx, mingo
Commit-ID: de616e36c700dc312d9021dd75f769c463f85122
Gitweb: http://git.kernel.org/tip/de616e36c700dc312d9021dd75f769c463f85122
Author: Andreas Herrmann <andreas.herrmann3@amd.com>
AuthorDate: Tue, 18 Aug 2009 13:00:13 +0200
Committer: Ingo Molnar <mingo@elte.hu>
CommitDate: Tue, 18 Aug 2009 18:35:44 +0200
sched: Separate out build of ALLNODES sched groups from __build_sched_domains
For the sake of completeness.
Now all calls to init_sched_build_groups() are contained in
build_sched_groups().
Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
Cc: Peter Zijlstra <peterz@infradead.org>
LKML-Reference: <20090818110013.GK29515@alberich.amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
kernel/sched.c | 13 ++++++++-----
1 files changed, 8 insertions(+), 5 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index b09a41c..52c1953 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8593,6 +8593,12 @@ static void build_sched_groups(struct s_data *d, enum sched_domain_level l,
&cpu_to_phys_group,
d->send_covered, d->tmpmask);
break;
+#ifdef CONFIG_NUMA
+ case SD_LV_ALLNODES:
+ init_sched_build_groups(cpu_map, cpu_map, &cpu_to_allnodes_group,
+ d->send_covered, d->tmpmask);
+ break;
+#endif
default:
break;
}
@@ -8643,11 +8649,8 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
#ifdef CONFIG_NUMA
/* Set up node groups */
- if (d.sd_allnodes) {
- init_sched_build_groups(cpu_map, cpu_map,
- &cpu_to_allnodes_group,
- d.send_covered, d.tmpmask);
- }
+ if (d.sd_allnodes)
+ build_sched_groups(&d, SD_LV_ALLNODES, cpu_map, 0);
for (i = 0; i < nr_node_ids; i++) {
/* Set up node groups */
^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH 11/12] sched: Seperate out build of NUMA sched groups from __build_sched_domains
2009-08-18 10:49 [PATCH 0/12] cleanup __build_sched_domains() Andreas Herrmann
` (9 preceding siblings ...)
2009-08-18 11:00 ` [PATCH 10/12] sched: Seperate out build of ALLNODES " Andreas Herrmann
@ 2009-08-18 11:01 ` Andreas Herrmann
2009-08-18 16:53 ` [tip:sched/domains] sched: Separate " tip-bot for Andreas Herrmann
2009-08-18 11:02 ` [PATCH 12/12] sched: Consolidate definition of variable sd in __build_sched_domains Andreas Herrmann
2009-08-18 11:16 ` [PATCH 0/12] cleanup __build_sched_domains() Ingo Molnar
12 siblings, 1 reply; 30+ messages in thread
From: Andreas Herrmann @ 2009-08-18 11:01 UTC (permalink / raw)
To: Ingo Molnar, Peter Zijlstra; +Cc: linux-kernel
... to further strip down __build_sched_domains().
Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
---
kernel/sched.c | 130 +++++++++++++++++++++++++++++---------------------------
1 files changed, 67 insertions(+), 63 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index 8f53701..1933641 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8352,6 +8352,71 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
sg = sg->next;
} while (sg != group_head);
}
+
+static int build_numa_sched_groups(struct s_data *d,
+ const struct cpumask *cpu_map, int num)
+{
+ struct sched_domain *sd;
+ struct sched_group *sg, *prev;
+ int n, j;
+
+ cpumask_clear(d->covered);
+ cpumask_and(d->nodemask, cpumask_of_node(num), cpu_map);
+ if (cpumask_empty(d->nodemask)) {
+ d->sched_group_nodes[num] = NULL;
+ goto out;
+ }
+
+ sched_domain_node_span(num, d->domainspan);
+ cpumask_and(d->domainspan, d->domainspan, cpu_map);
+
+ sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
+ GFP_KERNEL, num);
+ if (!sg) {
+ printk(KERN_WARNING "Can not alloc domain group for node %d\n",
+ num);
+ return -ENOMEM;
+ }
+ d->sched_group_nodes[num] = sg;
+
+ for_each_cpu(j, d->nodemask) {
+ sd = &per_cpu(node_domains, j).sd;
+ sd->groups = sg;
+ }
+
+ sg->__cpu_power = 0;
+ cpumask_copy(sched_group_cpus(sg), d->nodemask);
+ sg->next = sg;
+ cpumask_or(d->covered, d->covered, d->nodemask);
+
+ prev = sg;
+ for (j = 0; j < nr_node_ids; j++) {
+ n = (num + j) % nr_node_ids;
+ cpumask_complement(d->notcovered, d->covered);
+ cpumask_and(d->tmpmask, d->notcovered, cpu_map);
+ cpumask_and(d->tmpmask, d->tmpmask, d->domainspan);
+ if (cpumask_empty(d->tmpmask))
+ break;
+ cpumask_and(d->tmpmask, d->tmpmask, cpumask_of_node(n));
+ if (cpumask_empty(d->tmpmask))
+ continue;
+ sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
+ GFP_KERNEL, num);
+ if (!sg) {
+ printk(KERN_WARNING
+ "Can not alloc domain group for node %d\n", j);
+ return -ENOMEM;
+ }
+ sg->__cpu_power = 0;
+ cpumask_copy(sched_group_cpus(sg), d->tmpmask);
+ sg->next = prev->next;
+ cpumask_or(d->covered, d->covered, d->tmpmask);
+ prev->next = sg;
+ prev = sg;
+ }
+out:
+ return 0;
+}
#endif /* CONFIG_NUMA */
#ifdef CONFIG_NUMA
@@ -8758,70 +8823,9 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
if (d.sd_allnodes)
build_sched_groups(&d, SD_LV_ALLNODES, cpu_map, 0);
- for (i = 0; i < nr_node_ids; i++) {
- /* Set up node groups */
- struct sched_group *sg, *prev;
- int j;
-
- cpumask_clear(d.covered);
- cpumask_and(d.nodemask, cpumask_of_node(i), cpu_map);
- if (cpumask_empty(d.nodemask)) {
- d.sched_group_nodes[i] = NULL;
- continue;
- }
-
- sched_domain_node_span(i, d.domainspan);
- cpumask_and(d.domainspan, d.domainspan, cpu_map);
-
- sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
- GFP_KERNEL, i);
- if (!sg) {
- printk(KERN_WARNING "Can not alloc domain group for "
- "node %d\n", i);
+ for (i = 0; i < nr_node_ids; i++)
+ if (build_numa_sched_groups(&d, cpu_map, i))
goto error;
- }
- d.sched_group_nodes[i] = sg;
- for_each_cpu(j, d.nodemask) {
- struct sched_domain *sd;
-
- sd = &per_cpu(node_domains, j).sd;
- sd->groups = sg;
- }
- sg->__cpu_power = 0;
- cpumask_copy(sched_group_cpus(sg), d.nodemask);
- sg->next = sg;
- cpumask_or(d.covered, d.covered, d.nodemask);
- prev = sg;
-
- for (j = 0; j < nr_node_ids; j++) {
- int n = (i + j) % nr_node_ids;
-
- cpumask_complement(d.notcovered, d.covered);
- cpumask_and(d.tmpmask, d.notcovered, cpu_map);
- cpumask_and(d.tmpmask, d.tmpmask, d.domainspan);
- if (cpumask_empty(d.tmpmask))
- break;
-
- cpumask_and(d.tmpmask, d.tmpmask, cpumask_of_node(n));
- if (cpumask_empty(d.tmpmask))
- continue;
-
- sg = kmalloc_node(sizeof(struct sched_group) +
- cpumask_size(),
- GFP_KERNEL, i);
- if (!sg) {
- printk(KERN_WARNING
- "Can not alloc domain group for node %d\n", j);
- goto error;
- }
- sg->__cpu_power = 0;
- cpumask_copy(sched_group_cpus(sg), d.tmpmask);
- sg->next = prev->next;
- cpumask_or(d.covered, d.covered, d.tmpmask);
- prev->next = sg;
- prev = sg;
- }
- }
#endif
/* Calculate CPU power for physical packages and nodes */
--
1.6.4
^ permalink raw reply related [flat|nested] 30+ messages in thread* [tip:sched/domains] sched: Separate out build of NUMA sched groups from __build_sched_domains
2009-08-18 11:01 ` [PATCH 11/12] sched: Seperate out build of NUMA " Andreas Herrmann
@ 2009-08-18 16:53 ` tip-bot for Andreas Herrmann
0 siblings, 0 replies; 30+ messages in thread
From: tip-bot for Andreas Herrmann @ 2009-08-18 16:53 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, hpa, mingo, andreas.herrmann3, peterz, tglx, mingo
Commit-ID: 0601a88d8fa4508eaa49a6d96c6685e1dece38e3
Gitweb: http://git.kernel.org/tip/0601a88d8fa4508eaa49a6d96c6685e1dece38e3
Author: Andreas Herrmann <andreas.herrmann3@amd.com>
AuthorDate: Tue, 18 Aug 2009 13:01:11 +0200
Committer: Ingo Molnar <mingo@elte.hu>
CommitDate: Tue, 18 Aug 2009 18:35:44 +0200
sched: Separate out build of NUMA sched groups from __build_sched_domains
... to further strip down __build_sched_domains().
Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
Cc: Peter Zijlstra <peterz@infradead.org>
LKML-Reference: <20090818110111.GL29515@alberich.amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
kernel/sched.c | 130 +++++++++++++++++++++++++++++---------------------------
1 files changed, 67 insertions(+), 63 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index 52c1953..c1ce884 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8246,6 +8246,71 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
sg = sg->next;
} while (sg != group_head);
}
+
+static int build_numa_sched_groups(struct s_data *d,
+ const struct cpumask *cpu_map, int num)
+{
+ struct sched_domain *sd;
+ struct sched_group *sg, *prev;
+ int n, j;
+
+ cpumask_clear(d->covered);
+ cpumask_and(d->nodemask, cpumask_of_node(num), cpu_map);
+ if (cpumask_empty(d->nodemask)) {
+ d->sched_group_nodes[num] = NULL;
+ goto out;
+ }
+
+ sched_domain_node_span(num, d->domainspan);
+ cpumask_and(d->domainspan, d->domainspan, cpu_map);
+
+ sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
+ GFP_KERNEL, num);
+ if (!sg) {
+ printk(KERN_WARNING "Can not alloc domain group for node %d\n",
+ num);
+ return -ENOMEM;
+ }
+ d->sched_group_nodes[num] = sg;
+
+ for_each_cpu(j, d->nodemask) {
+ sd = &per_cpu(node_domains, j).sd;
+ sd->groups = sg;
+ }
+
+ sg->__cpu_power = 0;
+ cpumask_copy(sched_group_cpus(sg), d->nodemask);
+ sg->next = sg;
+ cpumask_or(d->covered, d->covered, d->nodemask);
+
+ prev = sg;
+ for (j = 0; j < nr_node_ids; j++) {
+ n = (num + j) % nr_node_ids;
+ cpumask_complement(d->notcovered, d->covered);
+ cpumask_and(d->tmpmask, d->notcovered, cpu_map);
+ cpumask_and(d->tmpmask, d->tmpmask, d->domainspan);
+ if (cpumask_empty(d->tmpmask))
+ break;
+ cpumask_and(d->tmpmask, d->tmpmask, cpumask_of_node(n));
+ if (cpumask_empty(d->tmpmask))
+ continue;
+ sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
+ GFP_KERNEL, num);
+ if (!sg) {
+ printk(KERN_WARNING
+ "Can not alloc domain group for node %d\n", j);
+ return -ENOMEM;
+ }
+ sg->__cpu_power = 0;
+ cpumask_copy(sched_group_cpus(sg), d->tmpmask);
+ sg->next = prev->next;
+ cpumask_or(d->covered, d->covered, d->tmpmask);
+ prev->next = sg;
+ prev = sg;
+ }
+out:
+ return 0;
+}
#endif /* CONFIG_NUMA */
#ifdef CONFIG_NUMA
@@ -8652,70 +8717,9 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
if (d.sd_allnodes)
build_sched_groups(&d, SD_LV_ALLNODES, cpu_map, 0);
- for (i = 0; i < nr_node_ids; i++) {
- /* Set up node groups */
- struct sched_group *sg, *prev;
- int j;
-
- cpumask_clear(d.covered);
- cpumask_and(d.nodemask, cpumask_of_node(i), cpu_map);
- if (cpumask_empty(d.nodemask)) {
- d.sched_group_nodes[i] = NULL;
- continue;
- }
-
- sched_domain_node_span(i, d.domainspan);
- cpumask_and(d.domainspan, d.domainspan, cpu_map);
-
- sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
- GFP_KERNEL, i);
- if (!sg) {
- printk(KERN_WARNING "Can not alloc domain group for "
- "node %d\n", i);
+ for (i = 0; i < nr_node_ids; i++)
+ if (build_numa_sched_groups(&d, cpu_map, i))
goto error;
- }
- d.sched_group_nodes[i] = sg;
- for_each_cpu(j, d.nodemask) {
- struct sched_domain *sd;
-
- sd = &per_cpu(node_domains, j).sd;
- sd->groups = sg;
- }
- sg->__cpu_power = 0;
- cpumask_copy(sched_group_cpus(sg), d.nodemask);
- sg->next = sg;
- cpumask_or(d.covered, d.covered, d.nodemask);
- prev = sg;
-
- for (j = 0; j < nr_node_ids; j++) {
- int n = (i + j) % nr_node_ids;
-
- cpumask_complement(d.notcovered, d.covered);
- cpumask_and(d.tmpmask, d.notcovered, cpu_map);
- cpumask_and(d.tmpmask, d.tmpmask, d.domainspan);
- if (cpumask_empty(d.tmpmask))
- break;
-
- cpumask_and(d.tmpmask, d.tmpmask, cpumask_of_node(n));
- if (cpumask_empty(d.tmpmask))
- continue;
-
- sg = kmalloc_node(sizeof(struct sched_group) +
- cpumask_size(),
- GFP_KERNEL, i);
- if (!sg) {
- printk(KERN_WARNING
- "Can not alloc domain group for node %d\n", j);
- goto error;
- }
- sg->__cpu_power = 0;
- cpumask_copy(sched_group_cpus(sg), d.tmpmask);
- sg->next = prev->next;
- cpumask_or(d.covered, d.covered, d.tmpmask);
- prev->next = sg;
- prev = sg;
- }
- }
#endif
/* Calculate CPU power for physical packages and nodes */
^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH 12/12] sched: Consolidate definition of variable sd in __build_sched_domains
2009-08-18 10:49 [PATCH 0/12] cleanup __build_sched_domains() Andreas Herrmann
` (10 preceding siblings ...)
2009-08-18 11:01 ` [PATCH 11/12] sched: Seperate out build of NUMA " Andreas Herrmann
@ 2009-08-18 11:02 ` Andreas Herrmann
2009-08-18 16:54 ` [tip:sched/domains] " tip-bot for Andreas Herrmann
2009-08-18 11:16 ` [PATCH 0/12] cleanup __build_sched_domains() Ingo Molnar
12 siblings, 1 reply; 30+ messages in thread
From: Andreas Herrmann @ 2009-08-18 11:02 UTC (permalink / raw)
To: Ingo Molnar, Peter Zijlstra; +Cc: linux-kernel
Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
---
kernel/sched.c | 13 ++++---------
1 files changed, 4 insertions(+), 9 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index 1933641..1c574ff 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8784,6 +8784,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
{
enum s_alloc alloc_state = sa_none;
struct s_data d;
+ struct sched_domain *sd;
int i;
#ifdef CONFIG_NUMA
d.sd_allnodes = 0;
@@ -8798,8 +8799,6 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
* Set up domains for cpus specified by the cpu_map.
*/
for_each_cpu(i, cpu_map) {
- struct sched_domain *sd;
-
cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)),
cpu_map);
@@ -8831,22 +8830,19 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
/* Calculate CPU power for physical packages and nodes */
#ifdef CONFIG_SCHED_SMT
for_each_cpu(i, cpu_map) {
- struct sched_domain *sd = &per_cpu(cpu_domains, i).sd;
-
+ sd = &per_cpu(cpu_domains, i).sd;
init_sched_groups_power(i, sd);
}
#endif
#ifdef CONFIG_SCHED_MC
for_each_cpu(i, cpu_map) {
- struct sched_domain *sd = &per_cpu(core_domains, i).sd;
-
+ sd = &per_cpu(core_domains, i).sd;
init_sched_groups_power(i, sd);
}
#endif
for_each_cpu(i, cpu_map) {
- struct sched_domain *sd = &per_cpu(phys_domains, i).sd;
-
+ sd = &per_cpu(phys_domains, i).sd;
init_sched_groups_power(i, sd);
}
@@ -8865,7 +8861,6 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
/* Attach the domains */
for_each_cpu(i, cpu_map) {
- struct sched_domain *sd;
#ifdef CONFIG_SCHED_SMT
sd = &per_cpu(cpu_domains, i).sd;
#elif defined(CONFIG_SCHED_MC)
--
1.6.4
^ permalink raw reply related [flat|nested] 30+ messages in thread* [tip:sched/domains] sched: Consolidate definition of variable sd in __build_sched_domains
2009-08-18 11:02 ` [PATCH 12/12] sched: Consolidate definition of variable sd in __build_sched_domains Andreas Herrmann
@ 2009-08-18 16:54 ` tip-bot for Andreas Herrmann
0 siblings, 0 replies; 30+ messages in thread
From: tip-bot for Andreas Herrmann @ 2009-08-18 16:54 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, hpa, mingo, andreas.herrmann3, peterz, tglx, mingo
Commit-ID: 294b0c9619a0469a3b385b6fc47e79f64222a692
Gitweb: http://git.kernel.org/tip/294b0c9619a0469a3b385b6fc47e79f64222a692
Author: Andreas Herrmann <andreas.herrmann3@amd.com>
AuthorDate: Tue, 18 Aug 2009 13:02:29 +0200
Committer: Ingo Molnar <mingo@elte.hu>
CommitDate: Tue, 18 Aug 2009 18:35:45 +0200
sched: Consolidate definition of variable sd in __build_sched_domains
Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
Cc: Peter Zijlstra <peterz@infradead.org>
LKML-Reference: <20090818110229.GM29515@alberich.amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
kernel/sched.c | 13 ++++---------
1 files changed, 4 insertions(+), 9 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index c1ce884..cf4c953 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8678,6 +8678,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
{
enum s_alloc alloc_state = sa_none;
struct s_data d;
+ struct sched_domain *sd;
int i;
#ifdef CONFIG_NUMA
d.sd_allnodes = 0;
@@ -8692,8 +8693,6 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
* Set up domains for cpus specified by the cpu_map.
*/
for_each_cpu(i, cpu_map) {
- struct sched_domain *sd;
-
cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)),
cpu_map);
@@ -8725,22 +8724,19 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
/* Calculate CPU power for physical packages and nodes */
#ifdef CONFIG_SCHED_SMT
for_each_cpu(i, cpu_map) {
- struct sched_domain *sd = &per_cpu(cpu_domains, i).sd;
-
+ sd = &per_cpu(cpu_domains, i).sd;
init_sched_groups_power(i, sd);
}
#endif
#ifdef CONFIG_SCHED_MC
for_each_cpu(i, cpu_map) {
- struct sched_domain *sd = &per_cpu(core_domains, i).sd;
-
+ sd = &per_cpu(core_domains, i).sd;
init_sched_groups_power(i, sd);
}
#endif
for_each_cpu(i, cpu_map) {
- struct sched_domain *sd = &per_cpu(phys_domains, i).sd;
-
+ sd = &per_cpu(phys_domains, i).sd;
init_sched_groups_power(i, sd);
}
@@ -8759,7 +8755,6 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
/* Attach the domains */
for_each_cpu(i, cpu_map) {
- struct sched_domain *sd;
#ifdef CONFIG_SCHED_SMT
sd = &per_cpu(cpu_domains, i).sd;
#elif defined(CONFIG_SCHED_MC)
^ permalink raw reply related [flat|nested] 30+ messages in thread
* Re: [PATCH 0/12] cleanup __build_sched_domains()
2009-08-18 10:49 [PATCH 0/12] cleanup __build_sched_domains() Andreas Herrmann
` (11 preceding siblings ...)
2009-08-18 11:02 ` [PATCH 12/12] sched: Consolidate definition of variable sd in __build_sched_domains Andreas Herrmann
@ 2009-08-18 11:16 ` Ingo Molnar
2009-08-18 13:15 ` Andreas Herrmann
12 siblings, 1 reply; 30+ messages in thread
From: Ingo Molnar @ 2009-08-18 11:16 UTC (permalink / raw)
To: Andreas Herrmann; +Cc: Peter Zijlstra, linux-kernel
* Andreas Herrmann <andreas.herrmann3@amd.com> wrote:
> Hi,
>
> Following patches try to make __build_sched_domains() less ugly
> and more readable. They shouldn't be harmful. Thus I think they
> can be applied for .32.
>
> Patches are against tip/master as of today.
>
> FYI, I need those patches as a base for introducing a new domain
> level for multi-node CPUs for which I intend to sent patches as
> RFC asap.
Very nice cleanups!
Magny-Cours indeed will need one more sched-domains level,
something like:
[smt thread]
core
internal numa node
cpu socket
external numa node
... which is certainly interesting, especially since the hierarchy
possibly 'crosses', i.e. we might have the two internal numa nodes
share a L2 or L3 cache, right?
I'd also not be surprised if the load-balancer needed some care to
properly handle such a setup.
It's all welcome work in any case, and for .32.
Ingo
^ permalink raw reply [flat|nested] 30+ messages in thread* Re: [PATCH 0/12] cleanup __build_sched_domains()
2009-08-18 11:16 ` [PATCH 0/12] cleanup __build_sched_domains() Ingo Molnar
@ 2009-08-18 13:15 ` Andreas Herrmann
2009-08-18 13:25 ` Peter Zijlstra
0 siblings, 1 reply; 30+ messages in thread
From: Andreas Herrmann @ 2009-08-18 13:15 UTC (permalink / raw)
To: Ingo Molnar; +Cc: Peter Zijlstra, linux-kernel
On Tue, Aug 18, 2009 at 01:16:44PM +0200, Ingo Molnar wrote:
>
> * Andreas Herrmann <andreas.herrmann3@amd.com> wrote:
>
> > Hi,
> >
> > Following patches try to make __build_sched_domains() less ugly
> > and more readable. They shouldn't be harmful. Thus I think they
> > can be applied for .32.
> >
> > Patches are against tip/master as of today.
> >
> > FYI, I need those patches as a base for introducing a new domain
> > level for multi-node CPUs for which I intend to sent patches as
> > RFC asap.
>
> Very nice cleanups!
>
> Magny-Cours indeed will need one more sched-domains level,
> something like:
>
> [smt thread]
> core
> internal numa node
> cpu socket
> external numa node
My current approach is to have the numa node domain either below CPU
(in case of multi-cpu node where SRAT describes each internal node as
a NUMA node) or as is, as the top-level domain (e.g. in case of node
interleaving or missing/broken ACPI SRAT detection).
Sched domain levels (note SMT==SIBLING, NODE==NUMA) are:
(1) groups in NUMA domain are subsets of groups in CPU domain
(2) groups in NUMA domain are supersets groups in CPU domain
(1) | (2)
------------|-------------------
SMT | SMT
MC | MC
MN (new) | MN
NUMA | CPU
CPU | NUMA
I'll also introduce a new parameter sched_mn_power_savings which will
cause that tasks are scheduled on one socket until its capacity is
reached. If capacity is reached other sockets can also be occupied.
> ... which is certainly interesting, especially since the hierarchy
> possibly 'crosses', i.e. we might have the two internal numa nodes
> share a L2 or L3 cache, right?
> I'd also not be surprised if the load-balancer needed some care to
> properly handle such a setup.
It needs some care and gave me some headache to get it working in all
cases (i.e. NUMA, no-NUMA, NUMA-but-no-SRAT). My current code (that
still needs to be split in proper patches for submission) works fine
in all but one case. And I am still debugging it.
The case that is not working is a normal (non-multi-node) NUMA system
on which switching to power policy does not take effect for already
running tasks. Just the new created ones are scheduled according to
the power policy.
> It's all welcome work in any case, and for .32.
Thanks,
Andreas
--
Operating | Advanced Micro Devices GmbH
System | Karl-Hammerschmidt-Str. 34, 85609 Dornach b. München, Germany
Research | Geschäftsführer: Thomas M. McCoy, Giuliano Meroni
Center | Sitz: Dornach, Gemeinde Aschheim, Landkreis München
(OSRC) | Registergericht München, HRB Nr. 43632
^ permalink raw reply [flat|nested] 30+ messages in thread
* Re: [PATCH 0/12] cleanup __build_sched_domains()
2009-08-18 13:15 ` Andreas Herrmann
@ 2009-08-18 13:25 ` Peter Zijlstra
0 siblings, 0 replies; 30+ messages in thread
From: Peter Zijlstra @ 2009-08-18 13:25 UTC (permalink / raw)
To: Andreas Herrmann; +Cc: Ingo Molnar, linux-kernel, Gautham Shenoy, Vatsa
On Tue, 2009-08-18 at 15:15 +0200, Andreas Herrmann wrote:
>
> > I'd also not be surprised if the load-balancer needed some care to
> > properly handle such a setup.
>
> It needs some care and gave me some headache to get it working in all
> cases (i.e. NUMA, no-NUMA, NUMA-but-no-SRAT). My current code (that
> still needs to be split in proper patches for submission) works fine
> in all but one case. And I am still debugging it.
>
> The case that is not working is a normal (non-multi-node) NUMA system
> on which switching to power policy does not take effect for already
> running tasks. Just the new created ones are scheduled according to
> the power policy.
Note that current upstream is broken here in a way that isn't fixable in
a straight fwd way. Ego and vatsa were looking into that iirc.
^ permalink raw reply [flat|nested] 30+ messages in thread