From: Frank Li <Frank.li@nxp.com>
To: "Benoît Monin" <benoit.monin@bootlin.com>
Cc: Vinod Koul <vkoul@kernel.org>,
Thomas Petazzoni <thomas.petazzoni@bootlin.com>,
Frank Li <Frank.Li@kernel.org>,
imx@lists.linux.dev, dmaengine@vger.kernel.org,
linux-kernel@vger.kernel.org
Subject: Re: [PATCH RFC 1/2] dmaengine: fsl-edma: Implement device_prep_peripheral_dma_vec
Date: Mon, 4 May 2026 11:58:08 -0400 [thread overview]
Message-ID: <afjCEG_Do01eVBBO@lizhi-Precision-Tower-5810> (raw)
In-Reply-To: <20260430-fsl-edma-dyn-sg-v1-1-4e0ecbe2df66@bootlin.com>
On Thu, Apr 30, 2026 at 11:49:32AM +0200, Benoît Monin wrote:
> Add implementation of .device_prep_peripheral_dma_vec() callback to setup
> a scatter/gather DMA transfer from an array of dma_vec structures. Setup
> a cyclic transfer if the DMA_PREP_REPEAT flag is set.
>
> Signed-off-by: Benoît Monin <benoit.monin@bootlin.com>
> ---
Please remove RFC for this patch.
Frank
> drivers/dma/fsl-edma-common.c | 110 ++++++++++++++++++++++++++++++++++++++++++
> drivers/dma/fsl-edma-common.h | 4 ++
> drivers/dma/fsl-edma-main.c | 2 +
> 3 files changed, 116 insertions(+)
>
> diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
> index bb7531c456df..26a5ecf493b9 100644
> --- a/drivers/dma/fsl-edma-common.c
> +++ b/drivers/dma/fsl-edma-common.c
> @@ -673,6 +673,116 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
> return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
> }
>
> +struct dma_async_tx_descriptor *fsl_edma_prep_peripheral_dma_vec(
> + struct dma_chan *chan, const struct dma_vec *vecs,
> + size_t nb, enum dma_transfer_direction direction,
> + unsigned long flags)
> +{
> + struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
> + struct fsl_edma_desc *fsl_desc;
> + dma_addr_t src_addr, dst_addr, last_sg;
> + u16 soff, doff, iter;
> + u32 nbytes;
> + int i;
> +
> + if (!is_slave_direction(direction))
> + return NULL;
> +
> + if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
> + return NULL;
> +
> + fsl_desc = fsl_edma_alloc_desc(fsl_chan, nb);
> + if (!fsl_desc)
> + return NULL;
> + fsl_desc->iscyclic = flags & DMA_PREP_REPEAT;
> + fsl_desc->dirn = direction;
> +
> + if (direction == DMA_MEM_TO_DEV) {
> + if (!fsl_chan->cfg.src_addr_width)
> + fsl_chan->cfg.src_addr_width = fsl_chan->cfg.dst_addr_width;
> + fsl_chan->attr =
> + fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width,
> + fsl_chan->cfg.dst_addr_width);
> + nbytes = fsl_chan->cfg.dst_addr_width *
> + fsl_chan->cfg.dst_maxburst;
> + } else {
> + if (!fsl_chan->cfg.dst_addr_width)
> + fsl_chan->cfg.dst_addr_width = fsl_chan->cfg.src_addr_width;
> + fsl_chan->attr =
> + fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width,
> + fsl_chan->cfg.dst_addr_width);
> + nbytes = fsl_chan->cfg.src_addr_width *
> + fsl_chan->cfg.src_maxburst;
> + }
> +
> + for (i = 0; i < nb; i++) {
> + if (direction == DMA_MEM_TO_DEV) {
> + src_addr = vecs[i].addr;
> + dst_addr = fsl_chan->dma_dev_addr;
> + soff = fsl_chan->cfg.dst_addr_width;
> + doff = 0;
> + } else if (direction == DMA_DEV_TO_MEM) {
> + src_addr = fsl_chan->dma_dev_addr;
> + dst_addr = vecs[i].addr;
> + soff = 0;
> + doff = fsl_chan->cfg.src_addr_width;
> + } else {
> + /* DMA_DEV_TO_DEV */
> + src_addr = fsl_chan->cfg.src_addr;
> + dst_addr = fsl_chan->cfg.dst_addr;
> + soff = 0;
> + doff = 0;
> + }
> +
> + /*
> + * Choose the suitable burst length if dma_vec length is not
> + * multiple of burst length so that the whole transfer length is
> + * multiple of minor loop(burst length).
> + */
> + if (vecs[i].len % nbytes) {
> + u32 width = (direction == DMA_DEV_TO_MEM) ? doff : soff;
> + u32 burst = (direction == DMA_DEV_TO_MEM) ?
> + fsl_chan->cfg.src_maxburst :
> + fsl_chan->cfg.dst_maxburst;
> + int j;
> +
> + for (j = burst; j > 1; j--) {
> + if (!(vecs[i].len % (j * width))) {
> + nbytes = j * width;
> + break;
> + }
> + }
> + /* Set burst size as 1 if there's no suitable one */
> + if (j == 1)
> + nbytes = width;
> + }
> + iter = vecs[i].len / nbytes;
> + if (i < nb - 1) {
> + last_sg = fsl_desc->tcd[(i + 1)].ptcd;
> + fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr,
> + dst_addr, fsl_chan->attr, soff,
> + nbytes, 0, iter, iter, doff, last_sg,
> + false, false, true);
> + } else {
> + if (fsl_desc->iscyclic) {
> + last_sg = fsl_desc->tcd[0].ptcd;
> + fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr,
> + dst_addr, fsl_chan->attr, soff,
> + nbytes, 0, iter, iter, doff, last_sg,
> + true, false, true);
> + } else {
> + last_sg = 0;
> + fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr,
> + dst_addr, fsl_chan->attr, soff,
> + nbytes, 0, iter, iter, doff, last_sg,
> + true, true, false);
> + }
> + }
> + }
> +
> + return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
> +}
> +
> struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
> struct dma_chan *chan, struct scatterlist *sgl,
> unsigned int sg_len, enum dma_transfer_direction direction,
> diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
> index 205a96489094..0d028048701d 100644
> --- a/drivers/dma/fsl-edma-common.h
> +++ b/drivers/dma/fsl-edma-common.h
> @@ -496,6 +496,10 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
> struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
> size_t period_len, enum dma_transfer_direction direction,
> unsigned long flags);
> +struct dma_async_tx_descriptor *fsl_edma_prep_peripheral_dma_vec(
> + struct dma_chan *chan, const struct dma_vec *vecs,
> + size_t nb, enum dma_transfer_direction direction,
> + unsigned long flags);
> struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
> struct dma_chan *chan, struct scatterlist *sgl,
> unsigned int sg_len, enum dma_transfer_direction direction,
> diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
> index 36155ab1602a..6693b4270a1a 100644
> --- a/drivers/dma/fsl-edma-main.c
> +++ b/drivers/dma/fsl-edma-main.c
> @@ -841,6 +841,8 @@ static int fsl_edma_probe(struct platform_device *pdev)
> fsl_edma->dma_dev.device_free_chan_resources
> = fsl_edma_free_chan_resources;
> fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
> + fsl_edma->dma_dev.device_prep_peripheral_dma_vec
> + = fsl_edma_prep_peripheral_dma_vec;
> fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
> fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic;
> fsl_edma->dma_dev.device_prep_dma_memcpy = fsl_edma_prep_memcpy;
>
> --
> 2.54.0
>
next prev parent reply other threads:[~2026-05-04 15:58 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-30 9:49 [PATCH RFC 0/2] dmaengine: fsl-edma: Scatter/gather improvements Benoît Monin
2026-04-30 9:49 ` [PATCH RFC 1/2] dmaengine: fsl-edma: Implement device_prep_peripheral_dma_vec Benoît Monin
2026-05-04 15:58 ` Frank Li [this message]
2026-05-05 13:51 ` Benoît Monin
2026-04-30 9:49 ` [PATCH RFC 2/2] dmaengine: fsl-edma: Support dynamic scatter/gather chaining Benoît Monin
2026-05-04 16:04 ` Frank Li
2026-05-05 13:51 ` Benoît Monin
2026-05-05 15:07 ` Frank Li
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=afjCEG_Do01eVBBO@lizhi-Precision-Tower-5810 \
--to=frank.li@nxp.com \
--cc=Frank.Li@kernel.org \
--cc=benoit.monin@bootlin.com \
--cc=dmaengine@vger.kernel.org \
--cc=imx@lists.linux.dev \
--cc=linux-kernel@vger.kernel.org \
--cc=thomas.petazzoni@bootlin.com \
--cc=vkoul@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox