public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Peter Zijlstra <peterz@infradead.org>
To: Andreas Herrmann <andreas.herrmann3@amd.com>
Cc: Ingo Molnar <mingo@elte.hu>, linux-kernel@vger.kernel.org
Subject: Re: [PATCH 02/12] sched: Separate out allocation/free/goto-hell from __build_sched_domains
Date: Tue, 18 Aug 2009 14:57:10 +0200	[thread overview]
Message-ID: <1250600230.7583.278.camel@twins> (raw)
In-Reply-To: <20090818105300.GC29515@alberich.amd.com>

On Tue, 2009-08-18 at 12:53 +0200, Andreas Herrmann wrote:
> @@ -8213,6 +8213,23 @@ struct s_data {
>         struct root_domain      *rd;
>  };
>  
> +enum s_alloc {
> +       sa_sched_groups = 0,
> +       sa_rootdomain,
> +       sa_tmpmask,
> +       sa_send_covered,
> +       sa_this_core_map,
> +       sa_this_sibling_map,
> +       sa_nodemask,
> +       sa_sched_group_nodes,
> +#ifdef CONFIG_NUMA
> +       sa_notcovered,
> +       sa_covered,
> +       sa_domainspan,
> +#endif
> +       sa_none,
> +};
> +
>  /*
>   * SMT sched-domains:
>   */
> @@ -8500,6 +8517,77 @@ static void set_domain_attribute(struct sched_domain *sd,
>         }
>  }
>  
> +static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
> +                                const struct cpumask *cpu_map)
> +{
> +       switch (what) {
> +       case sa_sched_groups:
> +               free_sched_groups(cpu_map, d->tmpmask); /* fall through */
> +               d->sched_group_nodes = NULL;
> +       case sa_rootdomain:
> +               free_rootdomain(d->rd); /* fall through */
> +       case sa_tmpmask:
> +               free_cpumask_var(d->tmpmask); /* fall through */
> +       case sa_send_covered:
> +               free_cpumask_var(d->send_covered); /* fall through */
> +       case sa_this_core_map:
> +               free_cpumask_var(d->this_core_map); /* fall through */
> +       case sa_this_sibling_map:
> +               free_cpumask_var(d->this_sibling_map); /* fall through */
> +       case sa_nodemask:
> +               free_cpumask_var(d->nodemask); /* fall through */
> +       case sa_sched_group_nodes:
> +#ifdef CONFIG_NUMA
> +               kfree(d->sched_group_nodes); /* fall through */
> +       case sa_notcovered:
> +               free_cpumask_var(d->notcovered); /* fall through */
> +       case sa_covered:
> +               free_cpumask_var(d->covered); /* fall through */
> +       case sa_domainspan:
> +               free_cpumask_var(d->domainspan); /* fall through */
> +#endif
> +       case sa_none:
> +               break;
> +       }
> +}
> +
> +static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
> +                                                  const struct cpumask *cpu_map)
> +{
> +#ifdef CONFIG_NUMA
> +       if (!alloc_cpumask_var(&d->domainspan, GFP_KERNEL))
> +               return sa_none;
> +       if (!alloc_cpumask_var(&d->covered, GFP_KERNEL))
> +               return sa_domainspan;
> +       if (!alloc_cpumask_var(&d->notcovered, GFP_KERNEL))
> +               return sa_covered;
> +       /* Allocate the per-node list of sched groups */
> +       d->sched_group_nodes = kcalloc(nr_node_ids,
> +                                     sizeof(struct sched_group *), GFP_KERNEL);
> +       if (!d->sched_group_nodes) {
> +               printk(KERN_WARNING "Can not alloc sched group node list\n");
> +               return sa_notcovered;
> +       }
> +       sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes;
> +#endif
> +       if (!alloc_cpumask_var(&d->nodemask, GFP_KERNEL))
> +               return sa_sched_group_nodes;
> +       if (!alloc_cpumask_var(&d->this_sibling_map, GFP_KERNEL))
> +               return sa_nodemask;
> +       if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL))
> +               return sa_this_sibling_map;
> +       if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL))
> +               return sa_this_core_map;
> +       if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL))
> +               return sa_send_covered;
> +       d->rd = alloc_rootdomain();
> +       if (!d->rd) {
> +               printk(KERN_WARNING "Cannot alloc root domain\n");
> +               return sa_tmpmask;
> +       }
> +       return sa_rootdomain;
> +}

Code like this makes me wonder if the decomposition you chose is the
right one.

I'd much rather see something that keeps the various domain levels fully
isolated. That is, the numa code should not need to know anything about
the multi-core code etc.

The above we still have everything mixed in one.

Maybe something along the lines of (skipping lots of fun detail):

struct domain_constructor {
	struct sched_domain *func(const struct cpumask *cpu_map,
				  struct sched_domain_attr *attr);
};

struct domain_constructor domain_constructors[] = {
	{ &construct_numa_domain },
	{ &construct_mc_domain },
	{ &construct_cpu_domain },
	{ &construct_smt_domain },
};

static int construct_sched_domains(const struct cpumask *cpu_map,
				   struct sched_domain_attr *attr)
{
	int i;
	struct sched_domain *top = NULL, *parent = NULL, *sd;

	for (i = 0; i < ARRAY_SIZE(domain_constructors); i++) {
		sd = domain_constructors[i].func(cpu_map, attr);
		if (!sd)
			continue;
		if (IS_PTR(sd)) {
			ret = PTR_ERR(sd);
			goto fail;
		}
		if (!top)
			top = sd;

		if (degenerate_domain(parent, sd)) {
			fold_domain(parent, sd);
			sd->destroy();
			continue;
		}

		sd->parent = parent;
		parent = sd;
	}

	ret = attach_domain(sd);
	if (ret)
		goto fail;

out:
	return ret;
	
fail:
	for (sd = parent; sd; sd = parent) {
		parent = sd->parent;
		sd->destroy();
	}

	goto out;
}



  reply	other threads:[~2009-08-18 12:57 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2009-08-18 10:49 [PATCH 0/12] cleanup __build_sched_domains() Andreas Herrmann
2009-08-18 10:51 ` [PATCH 01/12] sched: Use structure to store local data in __build_sched_domains Andreas Herrmann
2009-08-18 16:51   ` [tip:sched/domains] " tip-bot for Andreas Herrmann
2009-08-18 10:53 ` [PATCH 02/12] sched: Separate out allocation/free/goto-hell from __build_sched_domains Andreas Herrmann
2009-08-18 12:57   ` Peter Zijlstra [this message]
2009-08-18 13:35     ` Andreas Herrmann
2009-08-18 16:52   ` [tip:sched/domains] " tip-bot for Andreas Herrmann
2009-08-18 10:54 ` [PATCH 03/12] sched: Seperate out build of NUMA sched domain " Andreas Herrmann
2009-08-18 16:52   ` [tip:sched/domains] sched: Separate " tip-bot for Andreas Herrmann
2009-08-18 10:54 ` [PATCH 04/12] sched: Seperate out build of CPU " Andreas Herrmann
2009-08-18 16:52   ` [tip:sched/domains] sched: Separate " tip-bot for Andreas Herrmann
2009-08-18 10:56 ` [PATCH 05/12] sched: Seperate out build of MC " Andreas Herrmann
2009-08-18 16:52   ` [tip:sched/domains] sched: Separate " tip-bot for Andreas Herrmann
2009-08-18 10:57 ` [PATCH 06/12] sched: Seperate out build of SMT " Andreas Herrmann
2009-08-18 16:52   ` [tip:sched/domains] sched: Separate " tip-bot for Andreas Herrmann
2009-08-18 10:57 ` [PATCH 07/12] sched: Seperate out build of SMT sched groups " Andreas Herrmann
2009-08-18 16:53   ` [tip:sched/domains] sched: Separate " tip-bot for Andreas Herrmann
2009-08-18 10:58 ` [PATCH 08/12] sched: Seperate out build of MC " Andreas Herrmann
2009-08-18 16:53   ` [tip:sched/domains] sched: Separate " tip-bot for Andreas Herrmann
2009-08-18 10:59 ` [PATCH 09/12] sched: Seperate out build of CPU " Andreas Herrmann
2009-08-18 16:53   ` [tip:sched/domains] sched: Separate " tip-bot for Andreas Herrmann
2009-08-18 11:00 ` [PATCH 10/12] sched: Seperate out build of ALLNODES " Andreas Herrmann
2009-08-18 16:53   ` [tip:sched/domains] sched: Separate " tip-bot for Andreas Herrmann
2009-08-18 11:01 ` [PATCH 11/12] sched: Seperate out build of NUMA " Andreas Herrmann
2009-08-18 16:53   ` [tip:sched/domains] sched: Separate " tip-bot for Andreas Herrmann
2009-08-18 11:02 ` [PATCH 12/12] sched: Consolidate definition of variable sd in __build_sched_domains Andreas Herrmann
2009-08-18 16:54   ` [tip:sched/domains] " tip-bot for Andreas Herrmann
2009-08-18 11:16 ` [PATCH 0/12] cleanup __build_sched_domains() Ingo Molnar
2009-08-18 13:15   ` Andreas Herrmann
2009-08-18 13:25     ` Peter Zijlstra

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1250600230.7583.278.camel@twins \
    --to=peterz@infradead.org \
    --cc=andreas.herrmann3@amd.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@elte.hu \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox