From: Jisheng Zhang <jszhang@kernel.org>
To: Vinod Koul <vkoul@kernel.org>, Rob Herring <robh@kernel.org>,
Krzysztof Kozlowski <krzk+dt@kernel.org>,
Conor Dooley <conor+dt@kernel.org>,
Robin Murphy <robin.murphy@arm.com>
Cc: dmaengine@vger.kernel.org, devicetree@vger.kernel.org,
linux-arm-kernel@lists.infradead.org,
linux-kernel@vger.kernel.org
Subject: [PATCH 12/14] dmaengine: dma350: Support device_prep_dma_cyclic
Date: Sat, 23 Aug 2025 23:40:07 +0800 [thread overview]
Message-ID: <20250823154009.25992-13-jszhang@kernel.org> (raw)
In-Reply-To: <20250823154009.25992-1-jszhang@kernel.org>
Add support for device_prep_dma_cyclic() callback function to benefit
DMA cyclic client, for example ALSA.
Signed-off-by: Jisheng Zhang <jszhang@kernel.org>
---
drivers/dma/arm-dma350.c | 118 +++++++++++++++++++++++++++++++++++++--
1 file changed, 113 insertions(+), 5 deletions(-)
diff --git a/drivers/dma/arm-dma350.c b/drivers/dma/arm-dma350.c
index a285778264b9..5abb965c6687 100644
--- a/drivers/dma/arm-dma350.c
+++ b/drivers/dma/arm-dma350.c
@@ -212,8 +212,10 @@ struct d350_chan {
enum dma_status status;
dma_cookie_t cookie;
u32 residue;
+ u32 periods;
u8 tsz;
u8 ch;
+ bool cyclic;
bool has_trig;
bool has_wrap;
bool coherent;
@@ -475,6 +477,105 @@ d350_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return NULL;
}
+static struct dma_async_tx_descriptor *
+d350_prep_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
+ size_t buf_len, size_t period_len, enum dma_transfer_direction dir,
+ unsigned long flags)
+{
+ struct d350_chan *dch = to_d350_chan(chan);
+ u32 periods, trig, *cmd, tsz;
+ dma_addr_t src, dst, phys;
+ struct d350_desc *desc;
+ struct d350_sg *dsg;
+ int i, j;
+
+ if (unlikely(!is_slave_direction(dir) || !buf_len || !period_len))
+ return NULL;
+
+ periods = buf_len / period_len;
+
+ desc = kzalloc(struct_size(desc, sg, periods), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ dch->cyclic = true;
+ desc->sglen = periods;
+
+ if (dir == DMA_MEM_TO_DEV)
+ tsz = __ffs(dch->config.dst_addr_width | (1 << dch->tsz));
+ else
+ tsz = __ffs(dch->config.src_addr_width | (1 << dch->tsz));
+
+ for (i = 0; i < periods; i++) {
+ desc->sg[i].command = dma_pool_zalloc(dch->cmd_pool, GFP_NOWAIT, &phys);
+ if (unlikely(!desc->sg[i].command))
+ goto err_cmd_alloc;
+
+ desc->sg[i].phys = phys;
+ dsg = &desc->sg[i];
+
+ if (dir == DMA_MEM_TO_DEV) {
+ src = buf_addr + i * period_len;
+ dst = dch->config.dst_addr;
+ trig = CH_CTRL_USEDESTRIGIN;
+ } else {
+ src = dch->config.src_addr;
+ dst = buf_addr + i * period_len;
+ trig = CH_CTRL_USESRCTRIGIN;
+ }
+ dsg->tsz = tsz;
+ dsg->xsize = lower_16_bits(period_len >> dsg->tsz);
+ dsg->xsizehi = upper_16_bits(period_len >> dsg->tsz);
+
+ cmd = dsg->command;
+ cmd[0] = LINK_CTRL | LINK_SRCADDR | LINK_SRCADDRHI | LINK_DESADDR |
+ LINK_DESADDRHI | LINK_XSIZE | LINK_XSIZEHI | LINK_SRCTRANSCFG |
+ LINK_DESTRANSCFG | LINK_XADDRINC | LINK_LINKADDR;
+
+ cmd[1] = FIELD_PREP(CH_CTRL_TRANSIZE, dsg->tsz) |
+ FIELD_PREP(CH_CTRL_XTYPE, CH_CTRL_XTYPE_CONTINUE) |
+ FIELD_PREP(CH_CTRL_DONETYPE, CH_CTRL_DONETYPE_CMD) | trig;
+
+ cmd[2] = lower_32_bits(src);
+ cmd[3] = upper_32_bits(src);
+ cmd[4] = lower_32_bits(dst);
+ cmd[5] = upper_32_bits(dst);
+ cmd[6] = FIELD_PREP(CH_XY_SRC, dsg->xsize) | FIELD_PREP(CH_XY_DES, dsg->xsize);
+ cmd[7] = FIELD_PREP(CH_XY_SRC, dsg->xsizehi) | FIELD_PREP(CH_XY_DES, dsg->xsizehi);
+ if (dir == DMA_MEM_TO_DEV) {
+ cmd[0] |= LINK_DESTRIGINCFG;
+ cmd[8] = dch->coherent ? TRANSCFG_WB : TRANSCFG_NC;
+ cmd[9] = TRANSCFG_DEVICE;
+ cmd[10] = FIELD_PREP(CH_XY_SRC, 1);
+ cmd[11] = FIELD_PREP(CH_DESTRIGINMODE, CH_DESTRIG_DMA_FC) |
+ FIELD_PREP(CH_DESTRIGINTYPE, CH_DESTRIG_HW_REQ);
+ } else {
+ cmd[0] |= LINK_SRCTRIGINCFG;
+ cmd[8] = TRANSCFG_DEVICE;
+ cmd[9] = dch->coherent ? TRANSCFG_WB : TRANSCFG_NC;
+ cmd[10] = FIELD_PREP(CH_XY_DES, 1);
+ cmd[11] = FIELD_PREP(CH_SRCTRIGINMODE, CH_SRCTRIG_DMA_FC) |
+ FIELD_PREP(CH_SRCTRIGINTYPE, CH_SRCTRIG_HW_REQ);
+ }
+
+ if (i)
+ desc->sg[i - 1].command[12] = phys | CH_LINKADDR_EN;
+ }
+
+ /* cyclic list */
+ desc->sg[periods - 1].command[12] = desc->sg[0].phys | CH_LINKADDR_EN;
+
+ mb();
+
+ return vchan_tx_prep(&dch->vc, &desc->vd, flags);
+
+err_cmd_alloc:
+ for (j = 0; j < i; j++)
+ dma_pool_free(dch->cmd_pool, desc->sg[j].command, desc->sg[j].phys);
+ kfree(desc);
+ return NULL;
+}
+
static int d350_slave_config(struct dma_chan *chan, struct dma_slave_config *config)
{
struct d350_chan *dch = to_d350_chan(chan);
@@ -565,6 +666,7 @@ static int d350_terminate_all(struct dma_chan *chan)
}
vchan_get_all_descriptors(&dch->vc, &list);
list_splice_tail(&list, &dch->vc.desc_terminated);
+ dch->cyclic = false;
spin_unlock_irqrestore(&dch->vc.lock, flags);
return 0;
@@ -716,11 +818,15 @@ static irqreturn_t d350_irq(int irq, void *data)
spin_lock(&dch->vc.lock);
if (ch_status & CH_STAT_INTR_DONE) {
- vchan_cookie_complete(vd);
- dch->desc = NULL;
- dch->status = DMA_COMPLETE;
- dch->residue = 0;
- d350_start_next(dch);
+ if (dch->cyclic) {
+ vchan_cyclic_callback(vd);
+ } else {
+ vchan_cookie_complete(vd);
+ dch->desc = NULL;
+ dch->status = DMA_COMPLETE;
+ dch->residue = 0;
+ d350_start_next(dch);
+ }
} else {
dch->status = DMA_ERROR;
dch->residue = vd->tx_result.residue;
@@ -886,8 +992,10 @@ static int d350_probe(struct platform_device *pdev)
if (trig_bits) {
dmac->dma.directions |= (BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV));
dma_cap_set(DMA_SLAVE, dmac->dma.cap_mask);
+ dma_cap_set(DMA_CYCLIC, dmac->dma.cap_mask);
dmac->dma.device_config = d350_slave_config;
dmac->dma.device_prep_slave_sg = d350_prep_slave_sg;
+ dmac->dma.device_prep_dma_cyclic = d350_prep_cyclic;
}
if (memset) {
--
2.50.0
next prev parent reply other threads:[~2025-08-23 15:58 UTC|newest]
Thread overview: 35+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-08-23 15:39 [PATCH 00/14] dmaengine: dma350: Support slave_sg, cyclic and DMA-250 Jisheng Zhang
2025-08-23 15:39 ` [PATCH 01/14] dmaengine: dma350: Fix CH_CTRL_USESRCTRIGIN definition Jisheng Zhang
2025-08-29 10:44 ` Robin Murphy
2025-08-23 15:39 ` [PATCH 02/14] dmaengine: dma350: Add missing dch->coherent setting Jisheng Zhang
2025-08-29 10:52 ` Robin Murphy
2025-08-23 15:39 ` [PATCH 03/14] dmaengine: dma350: Check vchan_next_desc() return value Jisheng Zhang
2025-08-29 10:02 ` Robin Murphy
2025-08-23 15:39 ` [PATCH 04/14] dmaengine: dma350: Check dma_cookie_status() ret code and txstate Jisheng Zhang
2025-08-29 10:42 ` Robin Murphy
2025-08-23 15:40 ` [PATCH 05/14] dmaengine: dma350: Register the DMA controller to DT DMA helpers Jisheng Zhang
2025-08-29 20:37 ` Robin Murphy
2025-08-23 15:40 ` [PATCH 06/14] dmaengine: dma350: Use dmaenginem_async_device_register Jisheng Zhang
2025-08-23 15:40 ` [PATCH 07/14] dmaengine: dma350: Remove redundant err msg if platform_get_irq() fails Jisheng Zhang
2025-08-23 15:40 ` [PATCH 08/14] dt-bindings: dma: dma350: Document interrupt-names Jisheng Zhang
2025-08-23 16:09 ` Krzysztof Kozlowski
2025-08-24 9:49 ` Jisheng Zhang
2025-08-24 10:30 ` Krzysztof Kozlowski
2025-08-24 13:19 ` Jisheng Zhang
2025-08-29 11:08 ` Robin Murphy
2025-08-23 15:40 ` [PATCH 09/14] dmaengine: dma350: Support dma-channel-mask Jisheng Zhang
2025-08-23 16:10 ` Krzysztof Kozlowski
2025-08-24 7:01 ` kernel test robot
2025-08-23 15:40 ` [PATCH 10/14] dmaengine: dma350: Alloc command[] from dma pool Jisheng Zhang
2025-08-29 12:56 ` Robin Murphy
2025-09-01 10:27 ` Dan Carpenter
2025-08-23 15:40 ` [PATCH 11/14] dmaengine: dma350: Support device_prep_slave_sg Jisheng Zhang
2025-08-30 0:00 ` Robin Murphy
2025-08-23 15:40 ` Jisheng Zhang [this message]
2025-08-23 15:40 ` [PATCH 13/14] dt-bindings: dma: dma350: Support ARM DMA-250 Jisheng Zhang
2025-08-23 16:11 ` Krzysztof Kozlowski
2025-08-29 11:16 ` Robin Murphy
2025-08-23 15:40 ` [PATCH 14/14] dmaengine: " Jisheng Zhang
2025-08-23 16:13 ` Krzysztof Kozlowski
2025-08-24 19:38 ` kernel test robot
2025-08-29 22:24 ` Robin Murphy
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250823154009.25992-13-jszhang@kernel.org \
--to=jszhang@kernel.org \
--cc=conor+dt@kernel.org \
--cc=devicetree@vger.kernel.org \
--cc=dmaengine@vger.kernel.org \
--cc=krzk+dt@kernel.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=robh@kernel.org \
--cc=robin.murphy@arm.com \
--cc=vkoul@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).