linux-spi.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/3] spi: imx: Fix DMA transfer
@ 2015-03-01 14:15 Anton Bondarenko
       [not found] ` <1425219333-3014-1-git-send-email-anton_bondarenko-nmGgyN9QBj3QT0dZR+AlfA@public.gmane.org>
  0 siblings, 1 reply; 4+ messages in thread
From: Anton Bondarenko @ 2015-03-01 14:15 UTC (permalink / raw)
  To: broonie-DgEjT+Ai2ygdnm+yROfE0A
  Cc: linux-spi-u79uwXL29TY76Z2rM5mHXA, Frank.Li-KZfg59tc24xl57MIdRCFDg,
	b38343-KZfg59tc24xl57MIdRCFDg

RX DMA tail data handling doesn't work correctly in many cases with current
implementation. It happens because SPI core was setup to generates both
RX watermark level and RX DATA TAIL events incorrectly. SPI transfer triggering
for DMA also done in wrong way.

SPI client wants to transfer 70 words for example. The old DMA implementation
setup RX DATA TAIL equal 6 words. In this case RX DMA event will be generated
after 6 words read from RX FIFO.  In this case the garbage can be read out from
RX FIFO because SPI HW does not receive all required words to trigger
RX watermark event.

New implementation change handling of RX data tail. DMA is used to process all
TX data and only full chunks of RX data with size aligned to FIFO/2.
Driver is waiting until both TX and RX DMA transaction done and all TX data are pushed out.
At that moment there is only RX data tail in the RX FIFO. This data read out using PIO.

Transfer triggering changed to avoid RX data loss.

Signed-off-by: Anton Bondarenko <anton_bondarenko-nmGgyN9QBj3QT0dZR+AlfA@public.gmane.org>
---
 drivers/spi/spi-imx.c | 100 +++++++++++++++++++++++++++++++-------------------
 1 file changed, 62 insertions(+), 38 deletions(-)

diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 6fea4af..9df96c8 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -53,6 +53,7 @@
 /* generic defines to abstract from the different register layouts */
 #define MXC_INT_RR	(1 << 0) /* Receive data ready interrupt */
 #define MXC_INT_TE	(1 << 1) /* Transmit FIFO empty interrupt */
+#define MXC_INT_TCEN	BIT(7)   /* Transfer complete */
 
 /* The maximum  bytes that a sdma BD can transfer.*/
 #define MAX_SDMA_BD_BYTES  (1 << 15)
@@ -104,9 +105,7 @@ struct spi_imx_data {
 	unsigned int dma_is_inited;
 	unsigned int dma_finished;
 	bool usedma;
-	u32 rx_wml;
-	u32 tx_wml;
-	u32 rxt_wml;
+	u32 wml;
 	struct completion dma_rx_completion;
 	struct completion dma_tx_completion;
 
@@ -201,8 +200,7 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
 {
 	struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
 
-	if (spi_imx->dma_is_inited && (transfer->len > spi_imx->rx_wml)
-	    && (transfer->len > spi_imx->tx_wml))
+	if (spi_imx->dma_is_inited && (transfer->len > spi_imx->wml))
 		return true;
 	return false;
 }
@@ -227,6 +225,7 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
 #define MX51_ECSPI_INT		0x10
 #define MX51_ECSPI_INT_TEEN		(1 <<  0)
 #define MX51_ECSPI_INT_RREN		(1 <<  3)
+#define MX51_ECSPI_INT_TCEN		BIT(7)
 
 #define MX51_ECSPI_DMA      0x14
 #define MX51_ECSPI_DMA_TX_WML_OFFSET	0
@@ -291,6 +290,9 @@ static void __maybe_unused mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int
 	if (enable & MXC_INT_RR)
 		val |= MX51_ECSPI_INT_RREN;
 
+	if (enable & MXC_INT_TCEN)
+		val |= MX51_ECSPI_INT_TCEN;
+
 	writel(val, spi_imx->base + MX51_ECSPI_INT);
 }
 
@@ -310,8 +312,9 @@ static void __maybe_unused mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
 static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
 		struct spi_imx_config *config)
 {
-	u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0, dma = 0;
-	u32 tx_wml_cfg, rx_wml_cfg, rxt_wml_cfg;
+	u32 ctrl = MX51_ECSPI_CTRL_ENABLE, dma = 0;
+	u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
+
 	u32 clk = config->speed_hz, delay;
 
 	/*
@@ -368,21 +371,9 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
 	 * and enable DMA request.
 	 */
 	if (spi_imx->dma_is_inited) {
-		dma = readl(spi_imx->base + MX51_ECSPI_DMA);
-
-		spi_imx->tx_wml = spi_imx_get_fifosize(spi_imx) / 2;
-		spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2;
-		spi_imx->rxt_wml = spi_imx_get_fifosize(spi_imx) / 2;
-		rx_wml_cfg = spi_imx->rx_wml << MX51_ECSPI_DMA_RX_WML_OFFSET;
-		tx_wml_cfg = spi_imx->tx_wml << MX51_ECSPI_DMA_TX_WML_OFFSET;
-		rxt_wml_cfg = spi_imx->rxt_wml << MX51_ECSPI_DMA_RXT_WML_OFFSET;
-		dma = (dma & ~MX51_ECSPI_DMA_TX_WML_MASK
-			   & ~MX51_ECSPI_DMA_RX_WML_MASK
-			   & ~MX51_ECSPI_DMA_RXT_WML_MASK)
-			   | rx_wml_cfg | tx_wml_cfg | rxt_wml_cfg
-			   |(1 << MX51_ECSPI_DMA_TEDEN_OFFSET)
-			   |(1 << MX51_ECSPI_DMA_RXDEN_OFFSET)
-			   |(1 << MX51_ECSPI_DMA_RXTDEN_OFFSET);
+		dma =   (spi_imx->wml - 1) << MX51_ECSPI_DMA_RX_WML_OFFSET
+			| (1 << MX51_ECSPI_DMA_TEDEN_OFFSET)
+			| (1 << MX51_ECSPI_DMA_RXDEN_OFFSET);
 
 		writel(dma, spi_imx->base + MX51_ECSPI_DMA);
 	}
@@ -826,6 +817,8 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
 	if (of_machine_is_compatible("fsl,imx6dl"))
 		return 0;
 
+	spi_imx->wml = spi_imx_get_fifosize(spi_imx) / 2;
+
 	/* Prepare for TX DMA: */
 	master->dma_tx = dma_request_slave_channel(dev, "tx");
 	if (!master->dma_tx) {
@@ -837,7 +830,8 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
 	slave_config.direction = DMA_MEM_TO_DEV;
 	slave_config.dst_addr = res->start + MXC_CSPITXDATA;
 	slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
-	slave_config.dst_maxburst = spi_imx_get_fifosize(spi_imx) / 2;
+	slave_config.dst_maxburst = spi_imx_get_fifosize(spi_imx)
+					- spi_imx->wml;
 	ret = dmaengine_slave_config(master->dma_tx, &slave_config);
 	if (ret) {
 		dev_err(dev, "error in TX dma configuration.\n");
@@ -855,7 +849,8 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
 	slave_config.direction = DMA_DEV_TO_MEM;
 	slave_config.src_addr = res->start + MXC_CSPIRXDATA;
 	slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
-	slave_config.src_maxburst = spi_imx_get_fifosize(spi_imx) / 2;
+	slave_config.src_maxburst = spi_imx_get_fifosize(spi_imx)
+					- spi_imx->wml;
 	ret = dmaengine_slave_config(master->dma_rx, &slave_config);
 	if (ret) {
 		dev_err(dev, "error in RX dma configuration.\n");
@@ -896,8 +891,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
 	struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
 	int ret;
 	unsigned long timeout;
-	u32 dma;
-	int left;
+	const int left = transfer->len % spi_imx->wml;
 	struct spi_master *master = spi_imx->bitbang.master;
 	struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
 
@@ -914,9 +908,23 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
 	}
 
 	if (rx) {
+		/* Cut RX data tail */
+		const unsigned int old_nents = rx->nents;
+
+		WARN_ON(sg_dma_len(&rx->sgl[rx->nents - 1]) < left);
+		sg_dma_len(&rx->sgl[rx->nents - 1]) -= left;
+		if (sg_dma_len(&rx->sgl[rx->nents - 1]) == 0)
+			--rx->nents;
+
 		desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
 					rx->sgl, rx->nents, DMA_FROM_DEVICE,
 					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
+		/* Restore old SG table state */
+		if (old_nents > rx->nents)
+			++rx->nents;
+		sg_dma_len(&rx->sgl[rx->nents - 1]) += left;
+
 		if (!desc_rx)
 			goto no_dma;
 
@@ -931,17 +939,10 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
 	/* Trigger the cspi module. */
 	spi_imx->dma_finished = 0;
 
-	dma = readl(spi_imx->base + MX51_ECSPI_DMA);
-	dma = dma & (~MX51_ECSPI_DMA_RXT_WML_MASK);
-	/* Change RX_DMA_LENGTH trigger dma fetch tail data */
-	left = transfer->len % spi_imx->rxt_wml;
-	if (left)
-		writel(dma | (left << MX51_ECSPI_DMA_RXT_WML_OFFSET),
-				spi_imx->base + MX51_ECSPI_DMA);
+	dma_async_issue_pending(master->dma_rx);
+	dma_async_issue_pending(master->dma_tx);
 	spi_imx->devtype_data->trigger(spi_imx);
 
-	dma_async_issue_pending(master->dma_tx);
-	dma_async_issue_pending(master->dma_rx);
 	/* Wait SDMA to finish the data transfer.*/
 	timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
 						IMX_DMA_TIMEOUT);
@@ -950,6 +951,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
 			dev_driver_string(&master->dev),
 			dev_name(&master->dev));
 		dmaengine_terminate_all(master->dma_tx);
+		dmaengine_terminate_all(master->dma_rx);
 	} else {
 		timeout = wait_for_completion_timeout(
 				&spi_imx->dma_rx_completion, IMX_DMA_TIMEOUT);
@@ -959,10 +961,28 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
 				dev_name(&master->dev));
 			spi_imx->devtype_data->reset(spi_imx);
 			dmaengine_terminate_all(master->dma_rx);
+		} else if (left) {
+			dma_sync_sg_for_cpu(master->dma_rx->device->dev,
+					    rx->sgl, rx->nents,
+					    DMA_FROM_DEVICE);
+
+			spi_imx->rx_buf = transfer->rx_buf
+						+ (transfer->len - left);
+			spi_imx->txfifo = left;
+			reinit_completion(&spi_imx->xfer_done);
+
+			spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TCEN);
+
+			timeout = wait_for_completion_timeout(
+					&spi_imx->xfer_done, IMX_DMA_TIMEOUT);
+			if (!timeout) {
+				pr_warn("%s %s: I/O Error in RX tail\n",
+					dev_driver_string(&master->dev),
+					dev_name(&master->dev));
+			}
 		}
-		writel(dma |
-		       spi_imx->rxt_wml << MX51_ECSPI_DMA_RXT_WML_OFFSET,
-		       spi_imx->base + MX51_ECSPI_DMA);
+
+		writel(0, spi_imx->base + MX51_ECSPI_DMA);
 	}
 
 	spi_imx->dma_finished = 1;
@@ -1009,6 +1029,10 @@ static int spi_imx_transfer(struct spi_device *spi,
 	int ret;
 	struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
 
+	/* flush rxfifo before transfer */
+	while (spi_imx->devtype_data->rx_available(spi_imx))
+		spi_imx->rx(spi_imx);
+
 	if (spi_imx->bitbang.master->can_dma &&
 	    spi_imx_can_dma(spi_imx->bitbang.master, spi, transfer)) {
 		spi_imx->usedma = true;
-- 
2.3.0

--
To unsubscribe from this list: send the line "unsubscribe linux-spi" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 2/3] spi: imx: replace fixed timeout with calculated one
       [not found] ` <1425219333-3014-1-git-send-email-anton_bondarenko-nmGgyN9QBj3QT0dZR+AlfA@public.gmane.org>
@ 2015-03-01 14:15   ` Anton Bondarenko
  2015-03-01 14:15   ` [PATCH 3/3] spi: imx: add support for all SPI word width for DMA transfer Anton Bondarenko
  2015-03-09 10:31   ` [PATCH 1/3] spi: imx: Fix " Robin Gong
  2 siblings, 0 replies; 4+ messages in thread
From: Anton Bondarenko @ 2015-03-01 14:15 UTC (permalink / raw)
  To: broonie-DgEjT+Ai2ygdnm+yROfE0A
  Cc: linux-spi-u79uwXL29TY76Z2rM5mHXA, Frank.Li-KZfg59tc24xl57MIdRCFDg,
	b38343-KZfg59tc24xl57MIdRCFDg

Fixed timeout value can fire while transaction is ongoing. This may happen
because there are no strict requirements on SPI transaction duration.
Dynamic timeout value is generated based on SCLK and transaction size.

There is also 4 * SCLK delay between TX bursts related to CS change.

Signed-off-by: Anton Bondarenko <anton_bondarenko-nmGgyN9QBj3QT0dZR+AlfA@public.gmane.org>
---
 drivers/spi/spi-imx.c | 49 +++++++++++++++++++++++++++++++++++++++++--------
 1 file changed, 41 insertions(+), 8 deletions(-)

diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 9df96c8..f22d9cc 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -57,7 +57,6 @@
 
 /* The maximum  bytes that a sdma BD can transfer.*/
 #define MAX_SDMA_BD_BYTES  (1 << 15)
-#define IMX_DMA_TIMEOUT (msecs_to_jiffies(3000))
 struct spi_imx_config {
 	unsigned int speed_hz;
 	unsigned int bpw;
@@ -93,6 +92,7 @@ struct spi_imx_data {
 	struct clk *clk_per;
 	struct clk *clk_ipg;
 	unsigned long spi_clk;
+	unsigned int spi_bus_clk;
 
 	unsigned int count;
 	void (*tx)(struct spi_imx_data *);
@@ -314,8 +314,7 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
 {
 	u32 ctrl = MX51_ECSPI_CTRL_ENABLE, dma = 0;
 	u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
-
-	u32 clk = config->speed_hz, delay;
+	u32 delay;
 
 	/*
 	 * The hardware seems to have a race condition when changing modes. The
@@ -327,7 +326,9 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
 	ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
 
 	/* set clock speed */
-	ctrl |= mx51_ecspi_clkdiv(spi_imx->spi_clk, config->speed_hz, &clk);
+	spi_imx->spi_bus_clk = config->speed_hz;
+	ctrl |= mx51_ecspi_clkdiv(spi_imx->spi_clk, config->speed_hz,
+				  &spi_imx->spi_bus_clk);
 
 	/* set chip select to use */
 	ctrl |= MX51_ECSPI_CTRL_CS(config->cs);
@@ -360,7 +361,7 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
 	 * the SPI communication as the device on the other end would consider
 	 * the change of SCLK polarity as a clock tick already.
 	 */
-	delay = (2 * 1000000) / clk;
+	delay = (2 * USEC_PER_SEC) / spi_imx->spi_bus_clk;
 	if (likely(delay < 10))	/* SCLK is faster than 100 kHz */
 		udelay(delay);
 	else			/* SCLK is _very_ slow */
@@ -885,12 +886,40 @@ static void spi_imx_dma_tx_callback(void *cookie)
 	complete(&spi_imx->dma_tx_completion);
 }
 
+static int spi_imx_calculate_timeout(struct spi_imx_data *spi_imx, int size)
+{
+	unsigned long coef1 = 1;
+	unsigned long coef2 = MSEC_PER_SEC;
+	unsigned long timeout = 0;
+
+	/* Swap coeficients to avoid div by 0 */
+	if (spi_imx->spi_bus_clk < MSEC_PER_SEC) {
+		coef1 = MSEC_PER_SEC;
+		coef2 = 1;
+	}
+
+	/* Time with actual data transfer */
+	timeout += DIV_ROUND_UP(8 * size * coef1,
+				spi_imx->spi_bus_clk / coef2);
+
+	/* Take CS change delay related to HW */
+	timeout += DIV_ROUND_UP((size - 1) * 4 * coef1,
+				spi_imx->spi_bus_clk / coef2);
+
+	/* Add extra second for scheduler related activities */
+	timeout += MSEC_PER_SEC;
+
+	/* Double calculated timeout */
+	return msecs_to_jiffies(2 * timeout);
+}
+
 static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
 				struct spi_transfer *transfer)
 {
 	struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
 	int ret;
 	unsigned long timeout;
+	unsigned long transfer_timeout;
 	const int left = transfer->len % spi_imx->wml;
 	struct spi_master *master = spi_imx->bitbang.master;
 	struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
@@ -943,9 +972,11 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
 	dma_async_issue_pending(master->dma_tx);
 	spi_imx->devtype_data->trigger(spi_imx);
 
+	transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
+
 	/* Wait SDMA to finish the data transfer.*/
 	timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
-						IMX_DMA_TIMEOUT);
+						transfer_timeout);
 	if (!timeout) {
 		pr_warn("%s %s: I/O Error in DMA TX\n",
 			dev_driver_string(&master->dev),
@@ -953,8 +984,10 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
 		dmaengine_terminate_all(master->dma_tx);
 		dmaengine_terminate_all(master->dma_rx);
 	} else {
+		transfer_timeout = spi_imx_calculate_timeout(spi_imx,
+							     spi_imx->wml * 2);
 		timeout = wait_for_completion_timeout(
-				&spi_imx->dma_rx_completion, IMX_DMA_TIMEOUT);
+				&spi_imx->dma_rx_completion, transfer_timeout);
 		if (!timeout) {
 			pr_warn("%s %s: I/O Error in DMA RX\n",
 				dev_driver_string(&master->dev),
@@ -974,7 +1007,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
 			spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TCEN);
 
 			timeout = wait_for_completion_timeout(
-					&spi_imx->xfer_done, IMX_DMA_TIMEOUT);
+					&spi_imx->xfer_done, transfer_timeout);
 			if (!timeout) {
 				pr_warn("%s %s: I/O Error in RX tail\n",
 					dev_driver_string(&master->dev),
-- 
2.3.0

--
To unsubscribe from this list: send the line "unsubscribe linux-spi" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 3/3] spi: imx: add support for all SPI word width for DMA transfer
       [not found] ` <1425219333-3014-1-git-send-email-anton_bondarenko-nmGgyN9QBj3QT0dZR+AlfA@public.gmane.org>
  2015-03-01 14:15   ` [PATCH 2/3] spi: imx: replace fixed timeout with calculated one Anton Bondarenko
@ 2015-03-01 14:15   ` Anton Bondarenko
  2015-03-09 10:31   ` [PATCH 1/3] spi: imx: Fix " Robin Gong
  2 siblings, 0 replies; 4+ messages in thread
From: Anton Bondarenko @ 2015-03-01 14:15 UTC (permalink / raw)
  To: broonie-DgEjT+Ai2ygdnm+yROfE0A
  Cc: linux-spi-u79uwXL29TY76Z2rM5mHXA, Frank.Li-KZfg59tc24xl57MIdRCFDg,
	b38343-KZfg59tc24xl57MIdRCFDg

DMA transfer for SPI was limited to up to 8 bits word size until now.
Sync in SPI burst size and DMA bus width is necessary to correctly
support other BPW supported by HW.

Signed-off-by: Anton Bondarenko <anton_bondarenko-nmGgyN9QBj3QT0dZR+AlfA@public.gmane.org>
---
 drivers/spi/spi-imx.c | 118 ++++++++++++++++++++++++++++++++++++--------------
 1 file changed, 86 insertions(+), 32 deletions(-)

diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index f22d9cc..10d735d 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -89,11 +89,15 @@ struct spi_imx_data {
 
 	struct completion xfer_done;
 	void __iomem *base;
+	unsigned long base_phys;
+
 	struct clk *clk_per;
 	struct clk *clk_ipg;
 	unsigned long spi_clk;
 	unsigned int spi_bus_clk;
 
+	unsigned int bpw_w;
+
 	unsigned int count;
 	void (*tx)(struct spi_imx_data *);
 	void (*rx)(struct spi_imx_data *);
@@ -199,8 +203,14 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
 			 struct spi_transfer *transfer)
 {
 	struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+	unsigned int bpw_w = transfer->bits_per_word;
+
+	if (!bpw_w)
+		bpw_w = spi->bits_per_word;
+
+	bpw_w = DIV_ROUND_UP(bpw_w, 8);
 
-	if (spi_imx->dma_is_inited && (transfer->len > spi_imx->wml))
+	if (spi_imx->dma_is_inited && (transfer->len > spi_imx->wml * bpw_w))
 		return true;
 	return false;
 }
@@ -757,11 +767,62 @@ static irqreturn_t spi_imx_isr(int irq, void *dev_id)
 	return IRQ_HANDLED;
 }
 
+static int spi_imx_sdma_configure(struct spi_master *master)
+{
+	int ret;
+	enum dma_slave_buswidth dsb_default = DMA_SLAVE_BUSWIDTH_1_BYTE;
+	struct dma_slave_config slave_config = {};
+	struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+
+	switch (spi_imx->bpw_w) {
+	case 4:
+		dsb_default = DMA_SLAVE_BUSWIDTH_4_BYTES;
+		break;
+	case 2:
+		dsb_default = DMA_SLAVE_BUSWIDTH_2_BYTES;
+		break;
+	case 1:
+		dsb_default = DMA_SLAVE_BUSWIDTH_1_BYTE;
+		break;
+	default:
+		pr_err("Not supported word size %d\n", spi_imx->bpw_w);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	slave_config.direction = DMA_MEM_TO_DEV;
+	slave_config.dst_addr = spi_imx->base_phys + MXC_CSPITXDATA;
+	slave_config.dst_addr_width = dsb_default;
+	slave_config.dst_maxburst = spi_imx_get_fifosize(spi_imx)
+					- spi_imx->wml;
+	ret = dmaengine_slave_config(master->dma_tx, &slave_config);
+	if (ret) {
+		pr_err("error in TX dma configuration.\n");
+		goto err;
+	}
+
+	memset(&slave_config, 0, sizeof(slave_config));
+
+	slave_config.direction = DMA_DEV_TO_MEM;
+	slave_config.src_addr = spi_imx->base_phys + MXC_CSPIRXDATA;
+	slave_config.src_addr_width = dsb_default;
+	slave_config.src_maxburst = spi_imx_get_fifosize(spi_imx)
+					- spi_imx->wml;
+	ret = dmaengine_slave_config(master->dma_rx, &slave_config);
+	if (ret)
+		pr_err("error in RX dma configuration.\n");
+
+err:
+	return ret;
+}
+
 static int spi_imx_setupxfer(struct spi_device *spi,
 				 struct spi_transfer *t)
 {
 	struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
 	struct spi_imx_config config;
+	unsigned int bpw_w_new;
+	int ret = 0;
 
 	config.bpw = t ? t->bits_per_word : spi->bits_per_word;
 	config.speed_hz  = t ? t->speed_hz : spi->max_speed_hz;
@@ -785,9 +846,18 @@ static int spi_imx_setupxfer(struct spi_device *spi,
 		spi_imx->tx = spi_imx_buf_tx_u32;
 	}
 
-	spi_imx->devtype_data->config(spi_imx, &config);
+	bpw_w_new = DIV_ROUND_UP(config.bpw, 8);
+	if (spi_imx->dma_is_inited && spi_imx->bpw_w != bpw_w_new) {
+		spi_imx->bpw_w = bpw_w_new;
+		ret = spi_imx_sdma_configure(spi->master);
+		if (ret != 0)
+			pr_err("Can't configure SDMA, error %d\n", ret);
+	}
 
-	return 0;
+	if (!ret)
+		ret = spi_imx->devtype_data->config(spi_imx, &config);
+
+	return ret;
 }
 
 static void spi_imx_sdma_exit(struct spi_imx_data *spi_imx)
@@ -808,10 +878,8 @@ static void spi_imx_sdma_exit(struct spi_imx_data *spi_imx)
 }
 
 static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
-			     struct spi_master *master,
-			     const struct resource *res)
+			     struct spi_master *master)
 {
-	struct dma_slave_config slave_config = {};
 	int ret;
 
 	/* use pio mode for i.mx6dl chip TKT238285 */
@@ -828,17 +896,6 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
 		goto err;
 	}
 
-	slave_config.direction = DMA_MEM_TO_DEV;
-	slave_config.dst_addr = res->start + MXC_CSPITXDATA;
-	slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
-	slave_config.dst_maxburst = spi_imx_get_fifosize(spi_imx)
-					- spi_imx->wml;
-	ret = dmaengine_slave_config(master->dma_tx, &slave_config);
-	if (ret) {
-		dev_err(dev, "error in TX dma configuration.\n");
-		goto err;
-	}
-
 	/* Prepare for RX : */
 	master->dma_rx = dma_request_slave_channel(dev, "rx");
 	if (!master->dma_rx) {
@@ -847,23 +904,19 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
 		goto err;
 	}
 
-	slave_config.direction = DMA_DEV_TO_MEM;
-	slave_config.src_addr = res->start + MXC_CSPIRXDATA;
-	slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
-	slave_config.src_maxburst = spi_imx_get_fifosize(spi_imx)
-					- spi_imx->wml;
-	ret = dmaengine_slave_config(master->dma_rx, &slave_config);
-	if (ret) {
-		dev_err(dev, "error in RX dma configuration.\n");
-		goto err;
-	}
-
 	init_completion(&spi_imx->dma_rx_completion);
 	init_completion(&spi_imx->dma_tx_completion);
 	master->can_dma = spi_imx_can_dma;
 	master->max_dma_len = MAX_SDMA_BD_BYTES;
 	spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX |
 					 SPI_MASTER_MUST_TX;
+	spi_imx->bpw_w = 1;
+	ret = spi_imx_sdma_configure(master);
+	if (ret) {
+		dev_info(dev, "cannot get setup DMA.\n");
+		goto err;
+	}
+
 	spi_imx->dma_is_inited = 1;
 
 	return 0;
@@ -920,7 +973,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
 	int ret;
 	unsigned long timeout;
 	unsigned long transfer_timeout;
-	const int left = transfer->len % spi_imx->wml;
+	const int left = transfer->len % (spi_imx->wml * spi_imx->bpw_w);
 	struct spi_master *master = spi_imx->bitbang.master;
 	struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
 
@@ -1001,7 +1054,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
 
 			spi_imx->rx_buf = transfer->rx_buf
 						+ (transfer->len - left);
-			spi_imx->txfifo = left;
+			spi_imx->txfifo = DIV_ROUND_UP(left, spi_imx->bpw_w);
 			reinit_completion(&spi_imx->xfer_done);
 
 			spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TCEN);
@@ -1203,6 +1256,7 @@ static int spi_imx_probe(struct platform_device *pdev)
 		ret = PTR_ERR(spi_imx->base);
 		goto out_master_put;
 	}
+	spi_imx->base_phys = res->start;
 
 	irq = platform_get_irq(pdev, 0);
 	if (irq < 0) {
@@ -1242,8 +1296,8 @@ static int spi_imx_probe(struct platform_device *pdev)
 	 * Only validated on i.mx6 now, can remove the constrain if validated on
 	 * other chips.
 	 */
-	if (spi_imx->devtype_data == &imx51_ecspi_devtype_data
-	    && spi_imx_sdma_init(&pdev->dev, spi_imx, master, res))
+	if (spi_imx->devtype_data == &imx51_ecspi_devtype_data &&
+	    spi_imx_sdma_init(&pdev->dev, spi_imx, master))
 		dev_err(&pdev->dev, "dma setup error,use pio instead\n");
 
 	spi_imx->devtype_data->reset(spi_imx);
-- 
2.3.0

--
To unsubscribe from this list: send the line "unsubscribe linux-spi" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH 1/3] spi: imx: Fix DMA transfer
       [not found] ` <1425219333-3014-1-git-send-email-anton_bondarenko-nmGgyN9QBj3QT0dZR+AlfA@public.gmane.org>
  2015-03-01 14:15   ` [PATCH 2/3] spi: imx: replace fixed timeout with calculated one Anton Bondarenko
  2015-03-01 14:15   ` [PATCH 3/3] spi: imx: add support for all SPI word width for DMA transfer Anton Bondarenko
@ 2015-03-09 10:31   ` Robin Gong
  2 siblings, 0 replies; 4+ messages in thread
From: Robin Gong @ 2015-03-09 10:31 UTC (permalink / raw)
  To: Anton Bondarenko
  Cc: broonie-DgEjT+Ai2ygdnm+yROfE0A, linux-spi-u79uwXL29TY76Z2rM5mHXA,
	Frank.Li-KZfg59tc24xl57MIdRCFDg

Hi Anton,
	Thanks for your patch, please see my comments below, and I'm also little
for performance if we using interrupt for the last tail data in rxfifo, how about
just read out the data from rxfifo directly?
On Sun, Mar 01, 2015 at 03:15:31PM +0100, Anton Bondarenko wrote:
> RX DMA tail data handling doesn't work correctly in many cases with current
> implementation. It happens because SPI core was setup to generates both
> RX watermark level and RX DATA TAIL events incorrectly. SPI transfer triggering
> for DMA also done in wrong way.
> 
> SPI client wants to transfer 70 words for example. The old DMA implementation
> setup RX DATA TAIL equal 6 words. In this case RX DMA event will be generated
> after 6 words read from RX FIFO.  In this case the garbage can be read out from
> RX FIFO because SPI HW does not receive all required words to trigger
> RX watermark event.
> 
> New implementation change handling of RX data tail. DMA is used to process all
> TX data and only full chunks of RX data with size aligned to FIFO/2.
> Driver is waiting until both TX and RX DMA transaction done and all TX data are pushed out.
> At that moment there is only RX data tail in the RX FIFO. This data read out using PIO.
> 
> Transfer triggering changed to avoid RX data loss.
> 
> Signed-off-by: Anton Bondarenko <anton_bondarenko-nmGgyN9QBj3QT0dZR+AlfA@public.gmane.org>
> ---
>  drivers/spi/spi-imx.c | 100 +++++++++++++++++++++++++++++++-------------------
>  1 file changed, 62 insertions(+), 38 deletions(-)
> 
> diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
> index 6fea4af..9df96c8 100644
> --- a/drivers/spi/spi-imx.c
> +++ b/drivers/spi/spi-imx.c
> @@ -53,6 +53,7 @@
>  /* generic defines to abstract from the different register layouts */
>  #define MXC_INT_RR	(1 << 0) /* Receive data ready interrupt */
>  #define MXC_INT_TE	(1 << 1) /* Transmit FIFO empty interrupt */
> +#define MXC_INT_TCEN	BIT(7)   /* Transfer complete */
>  
>  /* The maximum  bytes that a sdma BD can transfer.*/
>  #define MAX_SDMA_BD_BYTES  (1 << 15)
> @@ -104,9 +105,7 @@ struct spi_imx_data {
>  	unsigned int dma_is_inited;
>  	unsigned int dma_finished;
>  	bool usedma;
> -	u32 rx_wml;
> -	u32 tx_wml;
> -	u32 rxt_wml;
> +	u32 wml;
>  	struct completion dma_rx_completion;
>  	struct completion dma_tx_completion;
>  
> @@ -201,8 +200,7 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
>  {
>  	struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
>  
> -	if (spi_imx->dma_is_inited && (transfer->len > spi_imx->rx_wml)
> -	    && (transfer->len > spi_imx->tx_wml))
> +	if (spi_imx->dma_is_inited && (transfer->len > spi_imx->wml))
>  		return true;
>  	return false;
>  }
> @@ -227,6 +225,7 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
>  #define MX51_ECSPI_INT		0x10
>  #define MX51_ECSPI_INT_TEEN		(1 <<  0)
>  #define MX51_ECSPI_INT_RREN		(1 <<  3)
> +#define MX51_ECSPI_INT_TCEN		BIT(7)
>  
>  #define MX51_ECSPI_DMA      0x14
>  #define MX51_ECSPI_DMA_TX_WML_OFFSET	0
> @@ -291,6 +290,9 @@ static void __maybe_unused mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int
>  	if (enable & MXC_INT_RR)
>  		val |= MX51_ECSPI_INT_RREN;
>  
> +	if (enable & MXC_INT_TCEN)
> +		val |= MX51_ECSPI_INT_TCEN;
> +
>  	writel(val, spi_imx->base + MX51_ECSPI_INT);
>  }
>  
> @@ -310,8 +312,9 @@ static void __maybe_unused mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
>  static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
>  		struct spi_imx_config *config)
>  {
> -	u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0, dma = 0;
> -	u32 tx_wml_cfg, rx_wml_cfg, rxt_wml_cfg;
> +	u32 ctrl = MX51_ECSPI_CTRL_ENABLE, dma = 0;
> +	u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
> +
>  	u32 clk = config->speed_hz, delay;
>  
>  	/*
> @@ -368,21 +371,9 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
>  	 * and enable DMA request.
>  	 */
>  	if (spi_imx->dma_is_inited) {
> -		dma = readl(spi_imx->base + MX51_ECSPI_DMA);
> -
> -		spi_imx->tx_wml = spi_imx_get_fifosize(spi_imx) / 2;
> -		spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2;
> -		spi_imx->rxt_wml = spi_imx_get_fifosize(spi_imx) / 2;
> -		rx_wml_cfg = spi_imx->rx_wml << MX51_ECSPI_DMA_RX_WML_OFFSET;
> -		tx_wml_cfg = spi_imx->tx_wml << MX51_ECSPI_DMA_TX_WML_OFFSET;
> -		rxt_wml_cfg = spi_imx->rxt_wml << MX51_ECSPI_DMA_RXT_WML_OFFSET;
> -		dma = (dma & ~MX51_ECSPI_DMA_TX_WML_MASK
> -			   & ~MX51_ECSPI_DMA_RX_WML_MASK
> -			   & ~MX51_ECSPI_DMA_RXT_WML_MASK)
> -			   | rx_wml_cfg | tx_wml_cfg | rxt_wml_cfg
> -			   |(1 << MX51_ECSPI_DMA_TEDEN_OFFSET)
> -			   |(1 << MX51_ECSPI_DMA_RXDEN_OFFSET)
> -			   |(1 << MX51_ECSPI_DMA_RXTDEN_OFFSET);
> +		dma =   (spi_imx->wml - 1) << MX51_ECSPI_DMA_RX_WML_OFFSET
> +			| (1 << MX51_ECSPI_DMA_TEDEN_OFFSET)
> +			| (1 << MX51_ECSPI_DMA_RXDEN_OFFSET);
> 
why set tx_wml to 0? Big performance will be impacted. Or there is known issue here?
>  		writel(dma, spi_imx->base + MX51_ECSPI_DMA);
>  	}
> @@ -826,6 +817,8 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
>  	if (of_machine_is_compatible("fsl,imx6dl"))
>  		return 0;
>  
> +	spi_imx->wml = spi_imx_get_fifosize(spi_imx) / 2;
> +
>  	/* Prepare for TX DMA: */
>  	master->dma_tx = dma_request_slave_channel(dev, "tx");
>  	if (!master->dma_tx) {
> @@ -837,7 +830,8 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
>  	slave_config.direction = DMA_MEM_TO_DEV;
>  	slave_config.dst_addr = res->start + MXC_CSPITXDATA;
>  	slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
> -	slave_config.dst_maxburst = spi_imx_get_fifosize(spi_imx) / 2;
> +	slave_config.dst_maxburst = spi_imx_get_fifosize(spi_imx)
> +					- spi_imx->wml;
>  	ret = dmaengine_slave_config(master->dma_tx, &slave_config);
>  	if (ret) {
>  		dev_err(dev, "error in TX dma configuration.\n");
> @@ -855,7 +849,8 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
>  	slave_config.direction = DMA_DEV_TO_MEM;
>  	slave_config.src_addr = res->start + MXC_CSPIRXDATA;
>  	slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
> -	slave_config.src_maxburst = spi_imx_get_fifosize(spi_imx) / 2;
> +	slave_config.src_maxburst = spi_imx_get_fifosize(spi_imx)
> +					- spi_imx->wml;
>  	ret = dmaengine_slave_config(master->dma_rx, &slave_config);
>  	if (ret) {
>  		dev_err(dev, "error in RX dma configuration.\n");
> @@ -896,8 +891,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
>  	struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
>  	int ret;
>  	unsigned long timeout;
> -	u32 dma;
> -	int left;
> +	const int left = transfer->len % spi_imx->wml;
>  	struct spi_master *master = spi_imx->bitbang.master;
>  	struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
>  
> @@ -914,9 +908,23 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
>  	}
>  
>  	if (rx) {
> +		/* Cut RX data tail */
> +		const unsigned int old_nents = rx->nents;
> +
> +		WARN_ON(sg_dma_len(&rx->sgl[rx->nents - 1]) < left);
> +		sg_dma_len(&rx->sgl[rx->nents - 1]) -= left;
> +		if (sg_dma_len(&rx->sgl[rx->nents - 1]) == 0)
> +			--rx->nents;
> +
>  		desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
>  					rx->sgl, rx->nents, DMA_FROM_DEVICE,
>  					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
> +
> +		/* Restore old SG table state */
> +		if (old_nents > rx->nents)
> +			++rx->nents;
> +		sg_dma_len(&rx->sgl[rx->nents - 1]) += left;
> +
>  		if (!desc_rx)
>  			goto no_dma;
>  
> @@ -931,17 +939,10 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
>  	/* Trigger the cspi module. */
>  	spi_imx->dma_finished = 0;
>  
> -	dma = readl(spi_imx->base + MX51_ECSPI_DMA);
> -	dma = dma & (~MX51_ECSPI_DMA_RXT_WML_MASK);
> -	/* Change RX_DMA_LENGTH trigger dma fetch tail data */
> -	left = transfer->len % spi_imx->rxt_wml;
> -	if (left)
> -		writel(dma | (left << MX51_ECSPI_DMA_RXT_WML_OFFSET),
> -				spi_imx->base + MX51_ECSPI_DMA);
> +	dma_async_issue_pending(master->dma_rx);
> +	dma_async_issue_pending(master->dma_tx);
It's better keep the same sequence as before, issue_pending after trigger.
>  	spi_imx->devtype_data->trigger(spi_imx);
>  
> -	dma_async_issue_pending(master->dma_tx);
> -	dma_async_issue_pending(master->dma_rx);
>  	/* Wait SDMA to finish the data transfer.*/
>  	timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
>  						IMX_DMA_TIMEOUT);
> @@ -950,6 +951,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
>  			dev_driver_string(&master->dev),
>  			dev_name(&master->dev));
>  		dmaengine_terminate_all(master->dma_tx);
> +		dmaengine_terminate_all(master->dma_rx);
>  	} else {
>  		timeout = wait_for_completion_timeout(
>  				&spi_imx->dma_rx_completion, IMX_DMA_TIMEOUT);
> @@ -959,10 +961,28 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
>  				dev_name(&master->dev));
>  			spi_imx->devtype_data->reset(spi_imx);
>  			dmaengine_terminate_all(master->dma_rx);
> +		} else if (left) {
> +			dma_sync_sg_for_cpu(master->dma_rx->device->dev,
> +					    rx->sgl, rx->nents,
> +					    DMA_FROM_DEVICE);
How about only for the last entry?

> +
> +			spi_imx->rx_buf = transfer->rx_buf
> +						+ (transfer->len - left);
> +			spi_imx->txfifo = left;
> +			reinit_completion(&spi_imx->xfer_done);
> +
> +			spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TCEN);
> +
> +			timeout = wait_for_completion_timeout(
> +					&spi_imx->xfer_done, IMX_DMA_TIMEOUT);
> +			if (!timeout) {
> +				pr_warn("%s %s: I/O Error in RX tail\n",
> +					dev_driver_string(&master->dev),
> +					dev_name(&master->dev));
> +			}
>  		}
> -		writel(dma |
> -		       spi_imx->rxt_wml << MX51_ECSPI_DMA_RXT_WML_OFFSET,
> -		       spi_imx->base + MX51_ECSPI_DMA);
> +
> +		writel(0, spi_imx->base + MX51_ECSPI_DMA);
That may cause the next DMA transfer failed without setup_transfer calling, because
you clear the setting in mx51_ecspi_config for DMA. Please remove 
	writel(0, spi_imx->base + MX51_ECSPI_DMA);
>  	}
>  
>  	spi_imx->dma_finished = 1;
> @@ -1009,6 +1029,10 @@ static int spi_imx_transfer(struct spi_device *spi,
>  	int ret;
>  	struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
>  
> +	/* flush rxfifo before transfer */
> +	while (spi_imx->devtype_data->rx_available(spi_imx))
> +		spi_imx->rx(spi_imx);
> +
Why flush rxfifo here?
>  	if (spi_imx->bitbang.master->can_dma &&
>  	    spi_imx_can_dma(spi_imx->bitbang.master, spi, transfer)) {
>  		spi_imx->usedma = true;
> -- 
> 2.3.0
> 
--
To unsubscribe from this list: send the line "unsubscribe linux-spi" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2015-03-09 10:31 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-03-01 14:15 [PATCH 1/3] spi: imx: Fix DMA transfer Anton Bondarenko
     [not found] ` <1425219333-3014-1-git-send-email-anton_bondarenko-nmGgyN9QBj3QT0dZR+AlfA@public.gmane.org>
2015-03-01 14:15   ` [PATCH 2/3] spi: imx: replace fixed timeout with calculated one Anton Bondarenko
2015-03-01 14:15   ` [PATCH 3/3] spi: imx: add support for all SPI word width for DMA transfer Anton Bondarenko
2015-03-09 10:31   ` [PATCH 1/3] spi: imx: Fix " Robin Gong

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).