netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Andrew Morton <akpm@linux-foundation.org>
To: Dan Williams <dan.j.williams@intel.com>
Cc: linux-kernel@vger.kernel.org, netdev@vger.kernel.org,
	maciej.sosnowski@intel.com, hskinnemoen@atmel.com,
	g.liakhovetski@gmx.de, nicolas.ferre@atmel.com
Subject: Re: [PATCH 04/13] dmaengine: centralize channel allocation, introduce dma_find_channel
Date: Fri, 14 Nov 2008 22:14:42 -0800	[thread overview]
Message-ID: <20081114221442.d16cdaa1.akpm@linux-foundation.org> (raw)
In-Reply-To: <20081114213437.32354.66972.stgit@dwillia2-linux.ch.intel.com>

On Fri, 14 Nov 2008 14:34:37 -0700 Dan Williams <dan.j.williams@intel.com> wrote:

> Allowing multiple clients to each define their own channel allocation
> scheme quickly leads to a pathological situation.  For memory-to-memory
> offload all clients can share a central allocator.
> 
> This simply moves the existing async_tx allocator to dmaengine with
> minimal fixups:
> * async_tx.c:get_chan_ref_by_cap --> dmaengine.c:nth_chan
> * async_tx.c:async_tx_rebalance --> dmaengine.c:dma_channel_rebalance
> * split out common code from async_tx.c:__async_tx_find_channel -->
>   dma_find_channel
> 
>  /**
> + * dma_cap_mask_all - enable iteration over all operation types
> + */
> +static dma_cap_mask_t dma_cap_mask_all;
> +
> +/**
> + * dma_chan_tbl_ent - tracks channel allocations per core/opertion
> + */

Would be conventional to document the fields as well.

> +struct dma_chan_tbl_ent {
> +	struct dma_chan *chan;
> +};
> +
> +/**
> + * channel_table - percpu lookup table for memory-to-memory offload providers
> + */
> +static struct dma_chan_tbl_ent *channel_table[DMA_TX_TYPE_END];
> +
> +static int __init dma_channel_table_init(void)
> +{
> +	enum dma_transaction_type cap;
> +	int err = 0;
> +
> +	bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
> +
> +	/* 'interrupt' and 'slave' are channel capabilities, but are not
> +	 * associated with an operation so they do not need an entry in the
> +	 * channel_table
> +	 */
> +	clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
> +	clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
> +
> +	for_each_dma_cap_mask(cap, dma_cap_mask_all) {
> +		channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
> +		if (!channel_table[cap]) {
> +			err = 1;

initcalls can return -ve errnos, and that at least would make the
message in do_one_initcall() more meaningful.

> +			break;
> +		}
> +	}
> +
> +	if (err) {
> +		pr_err("dmaengine: initialization failure\n");
> +		for_each_dma_cap_mask(cap, dma_cap_mask_all)
> +			if (channel_table[cap])
> +				free_percpu(channel_table[cap]);
> +	}
> +
> +	return err;
> +}
> +subsys_initcall(dma_channel_table_init);
> +
> +/**
> + * dma_find_channel - find a channel to carry out the operation
> + * @tx_type: transaction type
> + */
> +struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
> +{
> +	struct dma_chan *chan;
> +	int cpu;
> +
> +	WARN_ONCE(dmaengine_ref_count == 0,
> +		  "client called %s without a reference", __func__);
> +
> +	cpu = get_cpu();
> +	chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan;
> +	put_cpu();
> +
> +	return chan;
> +}
> +EXPORT_SYMBOL(dma_find_channel);

Strange.  We return the address of a per-cpu variable, but we've
reenabled preemption so this thread can now switch CPUs.  Hence there
must be spinlocking on *chan as well?

> +/**
> + * nth_chan - returns the nth channel of the given capability
> + * @cap: capability to match
> + * @n: nth channel desired
> + *
> + * Defaults to returning the channel with the desired capability and the
> + * lowest reference count when 'n' cannot be satisfied
> + */
> +static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
> +{
> +	struct dma_device *device;
> +	struct dma_chan *chan;
> +	struct dma_chan *ret = NULL;
> +	struct dma_chan *min = NULL;
> +
> +	list_for_each_entry(device, &dma_device_list, global_node) {
> +		if (!dma_has_cap(cap, device->cap_mask))
> +			continue;
> +		list_for_each_entry(chan, &device->channels, device_node) {
> +			if (!chan->client_count)
> +				continue;
> +			if (!min)
> +				min = chan;
> +			else if (chan->table_count < min->table_count)
> +				min = chan;
> +
> +			if (n-- == 0) {
> +				ret = chan;
> +				break; /* done */
> +			}
> +		}
> +		if (ret)
> +			break; /* done */
> +	}
> +
> +	if (!ret)
> +		ret = min;
> +
> +	if (ret)
> +		ret->table_count++;

Undocumented locking for ->table_count.

> +	return ret;
> +}
> +
> +/**
> + * dma_channel_rebalance - redistribute the available channels
> + *
> + * Optimize for cpu isolation (each cpu gets a dedicated channel for an
> + * operation type) in the SMP case,  and opertaion isolation (avoid
> + * multi-tasking channels) in the uniprocessor case.  Must be called
> + * under dma_list_mutex.
> + */
> +static void dma_channel_rebalance(void)
> +{
> +	struct dma_chan *chan;
> +	struct dma_device *device;
> +	int cpu;
> +	int cap;
> +	int n;
> +
> +	/* undo the last distribution */
> +	for_each_dma_cap_mask(cap, dma_cap_mask_all)
> +		for_each_possible_cpu(cpu)
> +			per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;

The number of possible cpus can be larger than the number of online
CPUs.  Is it worth making this code hotplug-aware?

> +	list_for_each_entry(device, &dma_device_list, global_node)
> +		list_for_each_entry(chan, &device->channels, device_node)
> +			chan->table_count = 0;
> +
> +	/* don't populate the channel_table if no clients are available */
> +	if (!dmaengine_ref_count)
> +		return;
> +
> +	/* redistribute available channels */
> +	n = 0;
> +	for_each_dma_cap_mask(cap, dma_cap_mask_all)
> +		for_each_online_cpu(cpu) {
> +			if (num_possible_cpus() > 1)
> +				chan = nth_chan(cap, n++);
> +			else
> +				chan = nth_chan(cap, -1);
> +
> +			per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
> +		}
> +}
> +
>
> ...
>

  reply	other threads:[~2008-11-15  6:14 UTC|newest]

Thread overview: 52+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2008-11-14 21:34 [PATCH 00/13] dmaengine redux Dan Williams
2008-11-14 21:34 ` [PATCH 01/13] async_tx, dmaengine: document channel allocation and api rework Dan Williams
2008-11-14 21:34 ` [PATCH 02/13] dmaengine: remove dependency on async_tx Dan Williams
2008-11-15  6:02   ` Andrew Morton
2008-11-17 23:44     ` Dan Williams
2008-11-14 21:34 ` [PATCH 03/13] dmaengine: up-level reference counting to the module level Dan Williams
2008-11-15  6:08   ` Andrew Morton
2008-11-18  3:42     ` Dan Williams
2008-12-04 16:56   ` Guennadi Liakhovetski
2008-12-04 18:51     ` Dan Williams
2008-12-04 19:28       ` Guennadi Liakhovetski
2008-12-08 22:39         ` Dan Williams
2008-12-12 14:28   ` Sosnowski, Maciej
2008-12-15 22:12     ` Dan Williams
2008-12-18 14:26       ` Sosnowski, Maciej
2008-11-14 21:34 ` [PATCH 04/13] dmaengine: centralize channel allocation, introduce dma_find_channel Dan Williams
2008-11-15  6:14   ` Andrew Morton [this message]
2008-11-18  5:59     ` Dan Williams
2008-11-14 21:34 ` [PATCH 05/13] dmaengine: provide a common 'issue_pending_all' implementation Dan Williams
2008-11-14 21:34 ` [PATCH 06/13] net_dma: convert to dma_find_channel Dan Williams
2008-11-14 21:34 ` [PATCH 07/13] dmaengine: introduce dma_request_channel and private channels Dan Williams
2008-12-02 15:52   ` Guennadi Liakhovetski
2008-12-02 17:16     ` Dan Williams
2008-12-02 17:27       ` Guennadi Liakhovetski
2008-12-02 19:10         ` Dan Williams
2008-12-02 21:28           ` Guennadi Liakhovetski
2009-01-30 17:03       ` Atsushi Nemoto
2009-01-30 23:13         ` Dan Williams
2009-01-30 23:27           ` Guennadi Liakhovetski
2009-01-31 12:18             ` Atsushi Nemoto
2008-12-02 17:26     ` Nicolas Ferre
2008-12-12 14:29   ` Sosnowski, Maciej
2008-12-15 23:55     ` Dan Williams
2008-12-18 14:33       ` Sosnowski, Maciej
2008-12-18 17:27         ` Dan Williams
2009-02-06 16:58   ` Atsushi Nemoto
2008-11-14 21:34 ` [PATCH 08/13] dmatest: convert to dma_request_channel Dan Williams
2008-11-15  6:17   ` Andrew Morton
2008-11-18 18:24     ` Dan Williams
2008-11-18 20:58       ` Andrew Morton
2008-11-18 22:19         ` Dan Williams
2008-11-14 21:35 ` [PATCH 09/13] atmel-mci: convert to dma_request_channel and down-level dma_slave Dan Williams
2009-01-30 16:40   ` Atsushi Nemoto
2009-01-30 23:02     ` Dan Williams
2008-11-14 21:35 ` [PATCH 10/13] dmaengine: replace dma_async_client_register with dmaengine_get Dan Williams
2008-11-14 21:35 ` [PATCH 11/13] dmaengine: kill struct dma_client and supporting infrastructure Dan Williams
2008-12-12 14:29   ` Sosnowski, Maciej
2008-12-16  0:09     ` Dan Williams
2008-12-18 14:34       ` Sosnowski, Maciej
2008-11-14 21:35 ` [PATCH 12/13] dmaengine: remove 'bigref' infrastructure Dan Williams
2008-11-14 21:35 ` [PATCH 13/13] dmaengine: kill enum dma_state_client Dan Williams
2008-12-12 14:27 ` [PATCH 00/13] dmaengine redux Sosnowski, Maciej

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20081114221442.d16cdaa1.akpm@linux-foundation.org \
    --to=akpm@linux-foundation.org \
    --cc=dan.j.williams@intel.com \
    --cc=g.liakhovetski@gmx.de \
    --cc=hskinnemoen@atmel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=maciej.sosnowski@intel.com \
    --cc=netdev@vger.kernel.org \
    --cc=nicolas.ferre@atmel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).