From: Carlos Song <carlos.song@nxp.com>
To: broonie@kernel.org, frank.li@nxp.com, hawnguo@kernel.org,
s.hauer@pengutronix.de, kernel@pengutronix.de,
festevam@gmail.com
Cc: linux-spi@vger.kernel.org, imx@lists.linux.dev,
linux-arm-kernel@lists.infradead.org,
linux-kernel@vger.kernel.org, Carlos Song <carlos.song@nxp.com>
Subject: [PATCH 2/6] spi: imx: introduce helper to clear DMA mode logic
Date: Tue, 25 Nov 2025 18:06:14 +0800 [thread overview]
Message-ID: <20251125100618.2159770-3-carlos.song@nxp.com> (raw)
In-Reply-To: <20251125100618.2159770-1-carlos.song@nxp.com>
Add a helper function to handle clearing DMA mode, including getting the
maximum watermark length and submitting the DMA request. This refactoring
makes the code more concise and improves readability.
No functional changes.
Signed-off-by: Carlos Song <carlos.song@nxp.com>
---
drivers/spi/spi-imx.c | 164 +++++++++++++++++++++++-------------------
1 file changed, 92 insertions(+), 72 deletions(-)
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index e78e02a84b50..012f5bcbf73f 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1437,6 +1437,94 @@ static int spi_imx_calculate_timeout(struct spi_imx_data *spi_imx, int size)
return secs_to_jiffies(2 * timeout);
}
+static int spi_imx_dma_submit(struct spi_imx_data *spi_imx,
+ struct spi_transfer *transfer)
+{
+ struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
+ struct spi_controller *controller = spi_imx->controller;
+ struct dma_async_tx_descriptor *desc_tx, *desc_rx;
+ unsigned long transfer_timeout;
+ unsigned long time_left;
+
+ /*
+ * The TX DMA setup starts the transfer, so make sure RX is configured
+ * before TX.
+ */
+ desc_rx = dmaengine_prep_slave_sg(controller->dma_rx,
+ rx->sgl, rx->nents, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_rx) {
+ transfer->error |= SPI_TRANS_FAIL_NO_START;
+ return -EINVAL;
+ }
+
+ desc_rx->callback = spi_imx_dma_rx_callback;
+ desc_rx->callback_param = (void *)spi_imx;
+ dmaengine_submit(desc_rx);
+ reinit_completion(&spi_imx->dma_rx_completion);
+ dma_async_issue_pending(controller->dma_rx);
+
+ desc_tx = dmaengine_prep_slave_sg(controller->dma_tx,
+ tx->sgl, tx->nents, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_tx) {
+ dmaengine_terminate_all(controller->dma_tx);
+ dmaengine_terminate_all(controller->dma_rx);
+ return -EINVAL;
+ }
+
+ desc_tx->callback = spi_imx_dma_tx_callback;
+ desc_tx->callback_param = (void *)spi_imx;
+ dmaengine_submit(desc_tx);
+ reinit_completion(&spi_imx->dma_tx_completion);
+ dma_async_issue_pending(controller->dma_tx);
+
+ spi_imx->devtype_data->trigger(spi_imx);
+
+ transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
+
+ /* Wait SDMA to finish the data transfer.*/
+ time_left = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
+ transfer_timeout);
+ if (!time_left) {
+ dev_err(spi_imx->dev, "I/O Error in DMA TX\n");
+ dmaengine_terminate_all(controller->dma_tx);
+ dmaengine_terminate_all(controller->dma_rx);
+ return -ETIMEDOUT;
+ }
+
+ time_left = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
+ transfer_timeout);
+ if (!time_left) {
+ dev_err(&controller->dev, "I/O Error in DMA RX\n");
+ spi_imx->devtype_data->reset(spi_imx);
+ dmaengine_terminate_all(controller->dma_rx);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void spi_imx_dma_max_wml_find(struct spi_imx_data *spi_imx,
+ struct spi_transfer *transfer)
+{
+ struct sg_table *rx = &transfer->rx_sg;
+ struct scatterlist *last_sg = sg_last(rx->sgl, rx->nents);
+ unsigned int bytes_per_word, i;
+
+ /* Get the right burst length from the last sg to ensure no tail data */
+ bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word);
+ for (i = spi_imx->devtype_data->fifo_size / 2; i > 0; i--) {
+ if (!(sg_dma_len(last_sg) % (i * bytes_per_word)))
+ break;
+ }
+ /* Use 1 as wml in case no available burst length got */
+ if (i == 0)
+ i = 1;
+
+ spi_imx->wml = i;
+}
+
static int spi_imx_dma_configure(struct spi_controller *controller)
{
int ret;
@@ -1484,26 +1572,10 @@ static int spi_imx_dma_configure(struct spi_controller *controller)
static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
struct spi_transfer *transfer)
{
- struct dma_async_tx_descriptor *desc_tx, *desc_rx;
- unsigned long transfer_timeout;
- unsigned long time_left;
struct spi_controller *controller = spi_imx->controller;
- struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
- struct scatterlist *last_sg = sg_last(rx->sgl, rx->nents);
- unsigned int bytes_per_word, i;
int ret;
- /* Get the right burst length from the last sg to ensure no tail data */
- bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word);
- for (i = spi_imx->devtype_data->fifo_size / 2; i > 0; i--) {
- if (!(sg_dma_len(last_sg) % (i * bytes_per_word)))
- break;
- }
- /* Use 1 as wml in case no available burst length got */
- if (i == 0)
- i = 1;
-
- spi_imx->wml = i;
+ spi_imx_dma_max_wml_find(spi_imx, transfer);
ret = spi_imx_dma_configure(controller);
if (ret)
@@ -1516,61 +1588,9 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
}
spi_imx->devtype_data->setup_wml(spi_imx);
- /*
- * The TX DMA setup starts the transfer, so make sure RX is configured
- * before TX.
- */
- desc_rx = dmaengine_prep_slave_sg(controller->dma_rx,
- rx->sgl, rx->nents, DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (!desc_rx) {
- ret = -EINVAL;
- goto dma_failure_no_start;
- }
-
- desc_rx->callback = spi_imx_dma_rx_callback;
- desc_rx->callback_param = (void *)spi_imx;
- dmaengine_submit(desc_rx);
- reinit_completion(&spi_imx->dma_rx_completion);
- dma_async_issue_pending(controller->dma_rx);
-
- desc_tx = dmaengine_prep_slave_sg(controller->dma_tx,
- tx->sgl, tx->nents, DMA_MEM_TO_DEV,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (!desc_tx) {
- dmaengine_terminate_all(controller->dma_tx);
- dmaengine_terminate_all(controller->dma_rx);
- return -EINVAL;
- }
-
- desc_tx->callback = spi_imx_dma_tx_callback;
- desc_tx->callback_param = (void *)spi_imx;
- dmaengine_submit(desc_tx);
- reinit_completion(&spi_imx->dma_tx_completion);
- dma_async_issue_pending(controller->dma_tx);
-
- spi_imx->devtype_data->trigger(spi_imx);
-
- transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
-
- /* Wait SDMA to finish the data transfer.*/
- time_left = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
- transfer_timeout);
- if (!time_left) {
- dev_err(spi_imx->dev, "I/O Error in DMA TX\n");
- dmaengine_terminate_all(controller->dma_tx);
- dmaengine_terminate_all(controller->dma_rx);
- return -ETIMEDOUT;
- }
-
- time_left = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
- transfer_timeout);
- if (!time_left) {
- dev_err(&controller->dev, "I/O Error in DMA RX\n");
- spi_imx->devtype_data->reset(spi_imx);
- dmaengine_terminate_all(controller->dma_rx);
- return -ETIMEDOUT;
- }
+ ret = spi_imx_dma_submit(spi_imx, transfer);
+ if (ret)
+ return ret;
return 0;
/* fallback to pio */
--
2.34.1
next prev parent reply other threads:[~2025-11-25 10:07 UTC|newest]
Thread overview: 34+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-11-25 10:06 [PATCH 0/6] Support ECSPI dynamic burst feature for DMA mode Carlos Song
2025-11-25 10:06 ` [PATCH 1/6] spi: imx: group spi_imx_dma_configure() with spi_imx_dma_transfer() Carlos Song
2025-11-25 15:32 ` Frank Li
2025-11-25 10:06 ` Carlos Song [this message]
2025-11-25 15:41 ` [PATCH 2/6] spi: imx: introduce helper to clear DMA mode logic Frank Li
2025-11-25 10:06 ` [PATCH 3/6] spi: imx: avoid dmaengine_terminate_all() on TX prep failure Carlos Song
2025-11-25 15:42 ` Frank Li
2025-11-25 10:06 ` [PATCH 4/6] spi: imx: handle DMA submission errors with dma_submit_error() Carlos Song
2025-11-25 15:45 ` Frank Li
2025-11-25 10:06 ` [PATCH 5/6] spi: imx: support dynamic burst length for ECSPI DMA mode Carlos Song
2025-11-25 12:10 ` Marc Kleine-Budde
2025-11-26 7:42 ` Carlos Song
2025-11-26 8:31 ` Marc Kleine-Budde
2025-11-26 9:51 ` [EXT] " Carlos Song
2025-11-26 11:17 ` Carlos Song
2025-11-26 12:36 ` Marc Kleine-Budde
2025-11-27 2:36 ` [EXT] " Carlos Song
2025-11-26 8:11 ` Marc Kleine-Budde
2025-11-26 8:18 ` [EXT] " Carlos Song
2025-11-26 8:20 ` Marc Kleine-Budde
2025-11-26 8:34 ` [EXT] " Carlos Song
2025-11-26 8:44 ` Marc Kleine-Budde
2025-11-26 8:27 ` Marc Kleine-Budde
2025-11-26 8:43 ` [EXT] " Carlos Song
2025-11-26 12:22 ` Marc Kleine-Budde
2025-11-26 12:29 ` [EXT] " Carlos Song
2025-11-26 12:52 ` Marc Kleine-Budde
2025-11-27 2:58 ` Carlos Song
2025-11-25 10:06 ` [PATCH 6/6] spi: imx: enable DMA mode for target operation Carlos Song
2025-11-25 16:05 ` Frank Li
2025-11-26 2:11 ` Carlos Song
2025-12-02 7:00 ` Carlos Song
2025-11-26 12:18 ` Marc Kleine-Budde
2025-11-26 12:30 ` [EXT] " Carlos Song
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251125100618.2159770-3-carlos.song@nxp.com \
--to=carlos.song@nxp.com \
--cc=broonie@kernel.org \
--cc=festevam@gmail.com \
--cc=frank.li@nxp.com \
--cc=hawnguo@kernel.org \
--cc=imx@lists.linux.dev \
--cc=kernel@pengutronix.de \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-spi@vger.kernel.org \
--cc=s.hauer@pengutronix.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox