dmaengine.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [v4] dmaengine: pl330: flush before wait, and add dev burst support.
@ 2018-03-13 18:34 Frank Mori Hess
  0 siblings, 0 replies; 5+ messages in thread
From: Frank Mori Hess @ 2018-03-13 18:34 UTC (permalink / raw)
  To: dmaengine; +Cc: Vinod Koul, Dan Williams, linux-kernel

Do DMAFLUSHP _before_ the first DMAWFP to ensure controller
and peripheral are in agreement about dma request state before first
transfer.  Add support for burst transfers to/from peripherals. In the new
scheme, the controller does as many burst transfers as it can then
transfers the remaining dregs with either single transfers for
peripherals, or with a reduced size burst for memory-to-memory transfers.

Signed-off-by: Frank Mori Hess <fmh6jj@gmail.com>
Tested-by: Frank Mori Hess <fmh6jj@gmail.com>
---

I tested dma transfers to peripherals with v3 patch and designware serial 
port (drivers/tty/serial/8250/8250_dw.c) and a GPIB interface
(https://github.com/fmhess/fmh_gpib_core).  I tested memory-to-memory
transfers using the dmatest module.

v3 of this patch should be the same as v2 except with checkpatch.pl
warnings and errors cleaned up.

v4 addresses cosmetic complaints about v3, should be functionally unchanged.

 drivers/dma/pl330.c | 209 +++++++++++++++++++++++++++++++++++++++-------------
 1 file changed, 159 insertions(+), 50 deletions(-)

diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index d7327fd5f445..819a578e317f 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -27,6 +27,7 @@
 #include <linux/of_dma.h>
 #include <linux/err.h>
 #include <linux/pm_runtime.h>
+#include <linux/bug.h>
 
 #include "dmaengine.h"
 #define PL330_MAX_CHAN		8
@@ -1094,51 +1095,96 @@ static inline int _ldst_memtomem(unsigned dry_run, u8 buf[],
 	return off;
 }
 
-static inline int _ldst_devtomem(struct pl330_dmac *pl330, unsigned dry_run,
-				 u8 buf[], const struct _xfer_spec *pxs,
-				 int cyc)
+static u32 _emit_load(unsigned int dry_run, u8 buf[],
+	enum pl330_cond cond, enum dma_transfer_direction direction,
+	u8 peri)
 {
 	int off = 0;
-	enum pl330_cond cond;
 
-	if (pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP)
-		cond = BURST;
-	else
-		cond = SINGLE;
+	switch (direction) {
+	case DMA_MEM_TO_MEM:
+		/* fall through */
+	case DMA_MEM_TO_DEV:
+		off += _emit_LD(dry_run, &buf[off], cond);
+		break;
 
-	while (cyc--) {
-		off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri);
-		off += _emit_LDP(dry_run, &buf[off], cond, pxs->desc->peri);
-		off += _emit_ST(dry_run, &buf[off], ALWAYS);
+	case DMA_DEV_TO_MEM:
+		if (cond == ALWAYS) {
+			off += _emit_LDP(dry_run, &buf[off], SINGLE,
+				peri);
+			off += _emit_LDP(dry_run, &buf[off], BURST,
+				peri);
+		} else {
+			off += _emit_LDP(dry_run, &buf[off], cond,
+				peri);
+		}
+		break;
 
-		if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP))
-			off += _emit_FLUSHP(dry_run, &buf[off],
-					    pxs->desc->peri);
+	default:
+		/* this code should be unreachable */
+		WARN_ON(1);
+		break;
 	}
 
 	return off;
 }
 
-static inline int _ldst_memtodev(struct pl330_dmac *pl330,
+static inline u32 _emit_store(unsigned int dry_run, u8 buf[],
+	enum pl330_cond cond, enum dma_transfer_direction direction,
+	u8 peri)
+{
+	int off = 0;
+
+	switch (direction) {
+	case DMA_MEM_TO_MEM:
+		/* fall through */
+	case DMA_DEV_TO_MEM:
+		off += _emit_ST(dry_run, &buf[off], cond);
+		break;
+
+	case DMA_MEM_TO_DEV:
+		if (cond == ALWAYS) {
+			off += _emit_STP(dry_run, &buf[off], SINGLE,
+				peri);
+			off += _emit_STP(dry_run, &buf[off], BURST,
+				peri);
+		} else {
+			off += _emit_STP(dry_run, &buf[off], cond,
+				peri);
+		}
+		break;
+
+	default:
+		/* this code should be unreachable */
+		WARN_ON(1);
+		break;
+	}
+
+	return off;
+}
+
+static inline int _ldst_peripheral(struct pl330_dmac *pl330,
 				 unsigned dry_run, u8 buf[],
-				 const struct _xfer_spec *pxs, int cyc)
+				 const struct _xfer_spec *pxs, int cyc,
+				 enum pl330_cond cond)
 {
 	int off = 0;
-	enum pl330_cond cond;
 
 	if (pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP)
 		cond = BURST;
-	else
-		cond = SINGLE;
 
+	/*
+	 * do FLUSHP at beginning to clear any stale dma requests before the
+	 * first WFP.
+	 */
+	if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP))
+		off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri);
 	while (cyc--) {
 		off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri);
-		off += _emit_LD(dry_run, &buf[off], ALWAYS);
-		off += _emit_STP(dry_run, &buf[off], cond, pxs->desc->peri);
-
-		if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP))
-			off += _emit_FLUSHP(dry_run, &buf[off],
-					    pxs->desc->peri);
+		off += _emit_load(dry_run, &buf[off], cond, pxs->desc->rqtype,
+			pxs->desc->peri);
+		off += _emit_store(dry_run, &buf[off], cond, pxs->desc->rqtype,
+			pxs->desc->peri);
 	}
 
 	return off;
@@ -1148,19 +1194,65 @@ static int _bursts(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[],
 		const struct _xfer_spec *pxs, int cyc)
 {
 	int off = 0;
+	enum pl330_cond cond = BRST_LEN(pxs->ccr) > 1 ? BURST : SINGLE;
 
 	switch (pxs->desc->rqtype) {
 	case DMA_MEM_TO_DEV:
-		off += _ldst_memtodev(pl330, dry_run, &buf[off], pxs, cyc);
-		break;
+		/* fall through */
 	case DMA_DEV_TO_MEM:
-		off += _ldst_devtomem(pl330, dry_run, &buf[off], pxs, cyc);
+		off += _ldst_peripheral(pl330, dry_run, &buf[off], pxs, cyc,
+			cond);
 		break;
+
 	case DMA_MEM_TO_MEM:
 		off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
 		break;
+
+	default:
+		/* this code should be unreachable */
+		WARN_ON(1);
+		break;
+	}
+
+	return off;
+}
+
+/*
+ * transfer dregs with single transfers to peripheral, or a reduced size burst
+ * for mem-to-mem.
+ */
+static int _dregs(struct pl330_dmac *pl330, unsigned int dry_run, u8 buf[],
+		const struct _xfer_spec *pxs, int transfer_length)
+{
+	int off = 0;
+	int dregs_ccr;
+
+	if (transfer_length == 0)
+		return off;
+
+	switch (pxs->desc->rqtype) {
+	case DMA_MEM_TO_DEV:
+		/* fall through */
+	case DMA_DEV_TO_MEM:
+		off += _ldst_peripheral(pl330, dry_run, &buf[off], pxs,
+			transfer_length, SINGLE);
+		break;
+
+	case DMA_MEM_TO_MEM:
+		dregs_ccr = pxs->ccr;
+		dregs_ccr &= ~((0xf << CC_SRCBRSTLEN_SHFT) |
+			(0xf << CC_DSTBRSTLEN_SHFT));
+		dregs_ccr |= (((transfer_length - 1) & 0xf) <<
+			CC_SRCBRSTLEN_SHFT);
+		dregs_ccr |= (((transfer_length - 1) & 0xf) <<
+			CC_DSTBRSTLEN_SHFT);
+		off += _emit_MOV(dry_run, &buf[off], CCR, dregs_ccr);
+		off += _ldst_memtomem(dry_run, &buf[off], pxs, 1);
+		break;
+
 	default:
-		off += 0x40000000; /* Scare off the Client */
+		/* this code should be unreachable */
+		WARN_ON(1);
 		break;
 	}
 
@@ -1256,6 +1348,8 @@ static inline int _setup_loops(struct pl330_dmac *pl330,
 	struct pl330_xfer *x = &pxs->desc->px;
 	u32 ccr = pxs->ccr;
 	unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr);
+	int num_dregs = (x->bytes - BURST_TO_BYTE(bursts, ccr)) /
+		BRST_SIZE(ccr);
 	int off = 0;
 
 	while (bursts) {
@@ -1263,6 +1357,7 @@ static inline int _setup_loops(struct pl330_dmac *pl330,
 		off += _loop(pl330, dry_run, &buf[off], &c, pxs);
 		bursts -= c;
 	}
+	off += _dregs(pl330, dry_run, &buf[off], pxs, num_dregs);
 
 	return off;
 }
@@ -1294,7 +1389,6 @@ static int _setup_req(struct pl330_dmac *pl330, unsigned dry_run,
 		      struct _xfer_spec *pxs)
 {
 	struct _pl330_req *req = &thrd->req[index];
-	struct pl330_xfer *x;
 	u8 *buf = req->mc_cpu;
 	int off = 0;
 
@@ -1303,11 +1397,6 @@ static int _setup_req(struct pl330_dmac *pl330, unsigned dry_run,
 	/* DMAMOV CCR, ccr */
 	off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr);
 
-	x = &pxs->desc->px;
-	/* Error if xfer length is not aligned at burst size */
-	if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr)))
-		return -EINVAL;
-
 	off += _setup_xfer(pl330, dry_run, &buf[off], pxs);
 
 	/* DMASEV peripheral/event */
@@ -1365,6 +1454,20 @@ static int pl330_submit_req(struct pl330_thread *thrd,
 	u32 ccr;
 	int ret = 0;
 
+	switch (desc->rqtype) {
+	case DMA_MEM_TO_DEV:
+		break;
+
+	case DMA_DEV_TO_MEM:
+		break;
+
+	case DMA_MEM_TO_MEM:
+		break;
+
+	default:
+		return -ENOTSUPP;
+	}
+
 	if (pl330->state == DYING
 		|| pl330->dmac_tbd.reset_chan & (1 << thrd->id)) {
 		dev_info(thrd->dmac->ddma.dev, "%s:%d\n",
@@ -2104,6 +2207,18 @@ static bool pl330_prep_slave_fifo(struct dma_pl330_chan *pch,
 	return true;
 }
 
+static int fixup_burst_len(int max_burst_len, int quirks)
+{
+	if (quirks & PL330_QUIRK_BROKEN_NO_FLUSHP)
+		return 1;
+	else if (max_burst_len > PL330_MAX_BURST)
+		return PL330_MAX_BURST;
+	else if (max_burst_len < 1)
+		return 1;
+	else
+		return max_burst_len;
+}
+
 static int pl330_config(struct dma_chan *chan,
 			struct dma_slave_config *slave_config)
 {
@@ -2115,15 +2230,15 @@ static int pl330_config(struct dma_chan *chan,
 			pch->fifo_addr = slave_config->dst_addr;
 		if (slave_config->dst_addr_width)
 			pch->burst_sz = __ffs(slave_config->dst_addr_width);
-		if (slave_config->dst_maxburst)
-			pch->burst_len = slave_config->dst_maxburst;
+		pch->burst_len = fixup_burst_len(slave_config->dst_maxburst,
+			pch->dmac->quirks);
 	} else if (slave_config->direction == DMA_DEV_TO_MEM) {
 		if (slave_config->src_addr)
 			pch->fifo_addr = slave_config->src_addr;
 		if (slave_config->src_addr_width)
 			pch->burst_sz = __ffs(slave_config->src_addr_width);
-		if (slave_config->src_maxburst)
-			pch->burst_len = slave_config->src_maxburst;
+		pch->burst_len = fixup_burst_len(slave_config->src_maxburst,
+			pch->dmac->quirks);
 	}
 
 	return 0;
@@ -2517,14 +2632,8 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
 	burst_len >>= desc->rqcfg.brst_size;
 
 	/* src/dst_burst_len can't be more than 16 */
-	if (burst_len > 16)
-		burst_len = 16;
-
-	while (burst_len > 1) {
-		if (!(len % (burst_len << desc->rqcfg.brst_size)))
-			break;
-		burst_len--;
-	}
+	if (burst_len > PL330_MAX_BURST)
+		burst_len = PL330_MAX_BURST;
 
 	return burst_len;
 }
@@ -2596,7 +2705,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
 
 		desc->rqtype = direction;
 		desc->rqcfg.brst_size = pch->burst_sz;
-		desc->rqcfg.brst_len = 1;
+		desc->rqcfg.brst_len = pch->burst_len;
 		desc->bytes_requested = period_len;
 		fill_px(&desc->px, dst, src, period_len);
 
@@ -2741,7 +2850,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
 		}
 
 		desc->rqcfg.brst_size = pch->burst_sz;
-		desc->rqcfg.brst_len = 1;
+		desc->rqcfg.brst_len = pch->burst_len;
 		desc->rqtype = direction;
 		desc->bytes_requested = sg_dma_len(sg);
 	}

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [v4] dmaengine: pl330: flush before wait, and add dev burst support.
@ 2018-04-10  0:41 Frank Mori Hess
  0 siblings, 0 replies; 5+ messages in thread
From: Frank Mori Hess @ 2018-04-10  0:41 UTC (permalink / raw)
  To: dmaengine; +Cc: Vinod Koul, Dan Williams, linux-kernel

On Tue, Mar 13, 2018 at 2:34 PM, Frank Mori Hess <fmh6jj@gmail.com> wrote:
> Do DMAFLUSHP _before_ the first DMAWFP to ensure controller
> and peripheral are in agreement about dma request state before first
> transfer.  Add support for burst transfers to/from peripherals. In the new
> scheme, the controller does as many burst transfers as it can then
> transfers the remaining dregs with either single transfers for
> peripherals, or with a reduced size burst for memory-to-memory transfers.

Hi, what is the state of this patch?  I just noticed in patchwork it
is now listed as "Not applicable"?  The original broken-by-wordwrap
patch is listed as "Accepted"?

https://patchwork.kernel.org/project/linux-dmaengine/list/?submitter=178687&state=*

I found it has a bug handling dregs btw, I'll include a fix for that
in the next version or as a follow-on patch as appropriate.
---
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [v4] dmaengine: pl330: flush before wait, and add dev burst support.
@ 2018-04-10 15:37 Vinod Koul
  0 siblings, 0 replies; 5+ messages in thread
From: Vinod Koul @ 2018-04-10 15:37 UTC (permalink / raw)
  To: Frank Mori Hess; +Cc: dmaengine, Dan Williams, linux-kernel

On Mon, Apr 09, 2018 at 08:41:18PM -0400, Frank Mori Hess wrote:
> On Tue, Mar 13, 2018 at 2:34 PM, Frank Mori Hess <fmh6jj@gmail.com> wrote:
> > Do DMAFLUSHP _before_ the first DMAWFP to ensure controller
> > and peripheral are in agreement about dma request state before first
> > transfer.  Add support for burst transfers to/from peripherals. In the new
> > scheme, the controller does as many burst transfers as it can then
> > transfers the remaining dregs with either single transfers for
> > peripherals, or with a reduced size burst for memory-to-memory transfers.
> 
> Hi, what is the state of this patch?  I just noticed in patchwork it
> is now listed as "Not applicable"?  The original broken-by-wordwrap
> patch is listed as "Accepted"?
> 
> https://patchwork.kernel.org/project/linux-dmaengine/list/?submitter=178687&state=*

That is not correct state, my script would update as not applicable when it
doesn't find patch in my queue..

> I found it has a bug handling dregs btw, I'll include a fix for that
> in the next version or as a follow-on patch as appropriate.

Looks like I have missed this one somehow, so please update the patch with
fix and I shall look into it.

Thanks

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [v4] dmaengine: pl330: flush before wait, and add dev burst support.
@ 2018-04-15 18:12 Frank Mori Hess
  0 siblings, 0 replies; 5+ messages in thread
From: Frank Mori Hess @ 2018-04-15 18:12 UTC (permalink / raw)
  To: Vinod Koul; +Cc: dmaengine, Dan Williams, linux-kernel

On Tue, Apr 10, 2018 at 11:37 AM, Vinod Koul <vinod.koul@intel.com> wrote:
>>
>> Hi, what is the state of this patch?  I just noticed in patchwork it
>> is now listed as "Not applicable"?  The original broken-by-wordwrap
>> patch is listed as "Accepted"?
>>
>> https://patchwork.kernel.org/project/linux-dmaengine/list/?submitter=178687&state=*
>
> That is not correct state, my script would update as not applicable when it
> doesn't find patch in my queue..
>
>> I found it has a bug handling dregs btw, I'll include a fix for that
>> in the next version or as a follow-on patch as appropriate.
>
> Looks like I have missed this one somehow, so please update the patch with
> fix and I shall look into it.
>

Ok, I tested the v4 patch and it turns out it was a false alarm about
the bug in handling dregs.  It works fine.  I've registered in the
dmaengine patchwork, and changed the state of the v4 patch back to
"new".

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [v4] dmaengine: pl330: flush before wait, and add dev burst support.
@ 2018-04-16 15:33 Vinod Koul
  0 siblings, 0 replies; 5+ messages in thread
From: Vinod Koul @ 2018-04-16 15:33 UTC (permalink / raw)
  To: Frank Mori Hess; +Cc: dmaengine, Dan Williams, linux-kernel

On Sun, Apr 15, 2018 at 02:12:30PM -0400, Frank Mori Hess wrote:
> On Tue, Apr 10, 2018 at 11:37 AM, Vinod Koul <vinod.koul@intel.com> wrote:
> >>
> >> Hi, what is the state of this patch?  I just noticed in patchwork it
> >> is now listed as "Not applicable"?  The original broken-by-wordwrap
> >> patch is listed as "Accepted"?
> >>
> >> https://patchwork.kernel.org/project/linux-dmaengine/list/?submitter=178687&state=*
> >
> > That is not correct state, my script would update as not applicable when it
> > doesn't find patch in my queue..
> >
> >> I found it has a bug handling dregs btw, I'll include a fix for that
> >> in the next version or as a follow-on patch as appropriate.
> >
> > Looks like I have missed this one somehow, so please update the patch with
> > fix and I shall look into it.
> >
> 
> Ok, I tested the v4 patch and it turns out it was a false alarm about
> the bug in handling dregs.  It works fine.  I've registered in the
> dmaengine patchwork, and changed the state of the v4 patch back to
> "new".

Can you please repost this one after rebasing to dmaengine -next

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2018-04-16 15:33 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2018-04-16 15:33 [v4] dmaengine: pl330: flush before wait, and add dev burst support Vinod Koul
  -- strict thread matches above, loose matches on Subject: below --
2018-04-15 18:12 Frank Mori Hess
2018-04-10 15:37 Vinod Koul
2018-04-10  0:41 Frank Mori Hess
2018-03-13 18:34 Frank Mori Hess

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).