Linux kernel and device drivers for NXP i.MX platforms
 help / color / mirror / Atom feed
From: Frank Li <Frank.Li@nxp.com>
To: "Vinod Koul" <vkoul@kernel.org>,
	"Manivannan Sadhasivam" <mani@kernel.org>,
	"Krzysztof Wilczyński" <kwilczynski@kernel.org>,
	"Kishon Vijay Abraham I" <kishon@kernel.org>,
	"Bjorn Helgaas" <bhelgaas@google.com>,
	"Christoph Hellwig" <hch@lst.de>,
	"Sagi Grimberg" <sagi@grimberg.me>,
	"Chaitanya Kulkarni" <kch@nvidia.com>,
	"Herbert Xu" <herbert@gondor.apana.org.au>,
	"David S. Miller" <davem@davemloft.net>,
	"Nicolas Ferre" <nicolas.ferre@microchip.com>,
	"Alexandre Belloni" <alexandre.belloni@bootlin.com>,
	"Claudiu Beznea" <claudiu.beznea@tuxon.dev>,
	"Koichiro Den" <den@valinux.co.jp>,
	"Niklas Cassel" <cassel@kernel.org>
Cc: dmaengine@vger.kernel.org, linux-kernel@vger.kernel.org,
	 linux-pci@vger.kernel.org, linux-nvme@lists.infradead.org,
	 mhi@lists.linux.dev, linux-arm-msm@vger.kernel.org,
	 linux-crypto@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org,  imx@lists.linux.dev,
	Frank Li <Frank.Li@nxp.com>
Subject: [PATCH 1/8] dmaengine: Add API to combine configuration and preparation (sg and single)
Date: Mon, 08 Dec 2025 12:09:40 -0500	[thread overview]
Message-ID: <20251208-dma_prep_config-v1-1-53490c5e1e2a@nxp.com> (raw)
In-Reply-To: <20251208-dma_prep_config-v1-0-53490c5e1e2a@nxp.com>

Previously, configuration and preparation required two separate calls. This
works well when configuration is done only once during initialization.

However, in cases where the burst length or source/destination address must
be adjusted for each transfer, calling two functions is verbose and
requires additional locking to ensure both steps complete atomically.

Add a new API and callback device_prep_slave_sg_config that combines
configuration and preparation into a single operation. If the configuration
argument is passed as NULL, fall back to the existing implementation.

Signed-off-by: Frank Li <Frank.Li@nxp.com>
---
 include/linux/dmaengine.h | 64 +++++++++++++++++++++++++++++++++++++++++------
 1 file changed, 57 insertions(+), 7 deletions(-)

diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 99efe2b9b4ea9844ca6161208362ef18ef111d96..6c563549133a28e26f1bdc367372b1e4a748afcf 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -835,6 +835,8 @@ struct dma_filter {
  *	where the address and size of each segment is located in one entry of
  *	the dma_vec array.
  * @device_prep_slave_sg: prepares a slave dma operation
+ *	(Depericated, use @device_prep_slave_sg_config)
+ * @device_prep_slave_sg_config: prepares a slave dma operation
  * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
  *	The function takes a buffer of size buf_len. The callback function will
  *	be called after period_len bytes have been transferred.
@@ -934,6 +936,11 @@ struct dma_device {
 		struct dma_chan *chan, struct scatterlist *sgl,
 		unsigned int sg_len, enum dma_transfer_direction direction,
 		unsigned long flags, void *context);
+	struct dma_async_tx_descriptor *(*device_prep_slave_sg_config)(
+		struct dma_chan *chan, struct scatterlist *sgl,
+		unsigned int sg_len, enum dma_transfer_direction direction,
+		unsigned long flags, struct dma_slave_config *config,
+		void *context);
 	struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
 		struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
 		size_t period_len, enum dma_transfer_direction direction,
@@ -974,22 +981,44 @@ static inline bool is_slave_direction(enum dma_transfer_direction direction)
 	       (direction == DMA_DEV_TO_DEV);
 }
 
-static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
-	struct dma_chan *chan, dma_addr_t buf, size_t len,
-	enum dma_transfer_direction dir, unsigned long flags)
+static inline struct dma_async_tx_descriptor *
+dmaengine_prep_slave_single_config(struct dma_chan *chan, dma_addr_t buf,
+				   size_t len, enum dma_transfer_direction dir,
+				   unsigned long flags,
+				   struct dma_slave_config *config)
 {
 	struct scatterlist sg;
 	sg_init_table(&sg, 1);
 	sg_dma_address(&sg) = buf;
 	sg_dma_len(&sg) = len;
 
-	if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
+	if (!chan || !chan->device)
+		return NULL;
+
+	if (chan->device->device_prep_slave_sg_config)
+		return chan->device->device_prep_slave_sg_config(chan, &sg, 1,
+					dir, flags, config, NULL);
+
+	if (config)
+		if (dmaengine_slave_config(chan, config))
+			return NULL;
+
+	if (!chan->device->device_prep_slave_sg)
 		return NULL;
 
 	return chan->device->device_prep_slave_sg(chan, &sg, 1,
 						  dir, flags, NULL);
 }
 
+static inline struct dma_async_tx_descriptor *
+dmaengine_prep_slave_single(struct dma_chan *chan, dma_addr_t buf, size_t len,
+			    enum dma_transfer_direction dir,
+			    unsigned long flags)
+{
+	return dmaengine_prep_slave_single_config(chan, buf, len, dir,
+						  flags, NULL);
+}
+
 /**
  * dmaengine_prep_peripheral_dma_vec() - Prepare a DMA scatter-gather descriptor
  * @chan: The channel to be used for this descriptor
@@ -1009,17 +1038,38 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_peripheral_dma_vec(
 							    dir, flags);
 }
 
-static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
+static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg_config(
 	struct dma_chan *chan, struct scatterlist *sgl,	unsigned int sg_len,
-	enum dma_transfer_direction dir, unsigned long flags)
+	enum dma_transfer_direction dir, unsigned long flags,
+	struct dma_slave_config *config)
 {
-	if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
+	if (!chan || !chan->device)
+		return NULL;
+
+	if (chan->device->device_prep_slave_sg_config)
+		return chan->device->device_prep_slave_sg_config(chan, sgl,
+								 sg_len, dir,
+								 flags, config,
+								 NULL);
+	if (config)
+		if (dmaengine_slave_config(chan, config))
+			return NULL;
+
+	if (!chan->device->device_prep_slave_sg)
 		return NULL;
 
 	return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
 						  dir, flags, NULL);
 }
 
+static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
+	struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
+	enum dma_transfer_direction dir, unsigned long flags)
+{
+	return dmaengine_prep_slave_sg_config(chan, sgl, sg_len, dir,
+					      flags, NULL);
+}
+
 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
 struct rio_dma_ext;
 static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(

-- 
2.34.1


  reply	other threads:[~2025-12-08 17:10 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-12-08 17:09 [PATCH 0/8] dmaengine: Add new API to combine onfiguration and descriptor preparation Frank Li
2025-12-08 17:09 ` Frank Li [this message]
2025-12-09  6:38   ` [PATCH 1/8] dmaengine: Add API to combine configuration and preparation (sg and single) Christoph Hellwig
2025-12-09 15:47     ` Frank Li
2025-12-10 22:55   ` Bjorn Helgaas
2025-12-08 17:09 ` [PATCH 2/8] PCI: endpoint: pci-epf-test: use new DMA API to simple code Frank Li
2025-12-10 22:50   ` Bjorn Helgaas
2025-12-08 17:09 ` [PATCH 3/8] dmaengine: dw-edma: Use new .device_prep_slave_sg_config() callback Frank Li
2025-12-08 17:09 ` [PATCH 4/8] dmaengine: dw-edma: Pass dma_slave_config to dw_edma_device_transfer() Frank Li
2025-12-08 17:09 ` [PATCH 5/8] nvmet: pci-epf: Remove unnecessary dmaengine_terminate_sync() on each DMA transfer Frank Li
2025-12-09  6:52   ` Damien Le Moal
2025-12-09 15:44     ` Frank Li
2025-12-08 17:09 ` [PATCH 6/8] nvmet: pci-epf: Use dmaengine_prep_slave_single_config() API Frank Li
2025-12-09  6:38   ` Christoph Hellwig
2025-12-10 19:05   ` kernel test robot
2025-12-10 22:54   ` Bjorn Helgaas
2025-12-08 17:09 ` [PATCH 7/8] PCI: epf-mhi:Using new API dmaengine_prep_slave_single_config() to simple code Frank Li
2025-12-10 22:53   ` Bjorn Helgaas
2025-12-08 17:09 ` [PATCH 8/8] crypto: atmel: Use dmaengine_prep_slave_single_config() API Frank Li
2025-12-10 23:38   ` kernel test robot
2025-12-09  7:20 ` [PATCH 0/8] dmaengine: Add new API to combine onfiguration and descriptor preparation Niklas Cassel
2025-12-16 12:45 ` Vinod Koul
2025-12-16 15:10   ` Frank Li
2025-12-16 15:43     ` Vinod Koul
2025-12-16 15:55       ` Frank Li
2025-12-16 17:17         ` Vinod Koul
2025-12-16 17:28           ` Frank Li
2025-12-17  5:10             ` Vinod Koul

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20251208-dma_prep_config-v1-1-53490c5e1e2a@nxp.com \
    --to=frank.li@nxp.com \
    --cc=alexandre.belloni@bootlin.com \
    --cc=bhelgaas@google.com \
    --cc=cassel@kernel.org \
    --cc=claudiu.beznea@tuxon.dev \
    --cc=davem@davemloft.net \
    --cc=den@valinux.co.jp \
    --cc=dmaengine@vger.kernel.org \
    --cc=hch@lst.de \
    --cc=herbert@gondor.apana.org.au \
    --cc=imx@lists.linux.dev \
    --cc=kch@nvidia.com \
    --cc=kishon@kernel.org \
    --cc=kwilczynski@kernel.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-arm-msm@vger.kernel.org \
    --cc=linux-crypto@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-nvme@lists.infradead.org \
    --cc=linux-pci@vger.kernel.org \
    --cc=mani@kernel.org \
    --cc=mhi@lists.linux.dev \
    --cc=nicolas.ferre@microchip.com \
    --cc=sagi@grimberg.me \
    --cc=vkoul@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox