linux-mtd.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v3 1/2] mtd: nand: sunxi: add support for DMA assisted operations
@ 2016-04-15 13:10 Boris Brezillon
  2016-04-15 13:10 ` [PATCH v3 2/2] mtd: nand: sunxi: update DT bindings Boris Brezillon
  2016-06-06 16:28 ` [PATCH v3 1/2] mtd: nand: sunxi: add support for DMA assisted operations Boris Brezillon
  0 siblings, 2 replies; 4+ messages in thread
From: Boris Brezillon @ 2016-04-15 13:10 UTC (permalink / raw)
  To: Boris Brezillon, Richard Weinberger, linux-mtd
  Cc: David Woodhouse, Brian Norris, Maxime Ripard, Chen-Yu Tsai,
	linux-sunxi

The sunxi NAND controller is able to pipeline ECC operations only when
operated in DMA mode, which improves a lot NAND throughput while keeping
CPU usage low.

Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
---
Changes since v2:
- completely drop the generic approach base on Russell's feedback
  (doing DMA on non-lowmem memory is unsafe)
- fix a bug in sunxi_nfc_hw_ecc_read_chunks_dma() where the ECC status
  value was lost when at least one bitflip was found in an erased chunk

Changes since v1:
- reworked sg_alloc_table_from_buf() to avoid splitting contiguous
  vmalloced area
- fixed a bug in the read_dma()
- fixed dma_direction flag in write_dma()

 drivers/mtd/nand/sunxi_nand.c | 330 +++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 323 insertions(+), 7 deletions(-)

diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index 1baf8983..5299ab0 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -153,6 +153,7 @@
 
 /* define bit use in NFC_ECC_ST */
 #define NFC_ECC_ERR(x)		BIT(x)
+#define NFC_ECC_ERR_MSK		GENMASK(15, 0)
 #define NFC_ECC_PAT_FOUND(x)	BIT(x + 16)
 #define NFC_ECC_ERR_CNT(b, x)	(((x) >> (((b) % 4) * 8)) & 0xff)
 
@@ -273,6 +274,7 @@ struct sunxi_nfc {
 	unsigned long clk_rate;
 	struct list_head chips;
 	struct completion complete;
+	struct dma_chan *dmac;
 };
 
 static inline struct sunxi_nfc *to_sunxi_nfc(struct nand_hw_control *ctrl)
@@ -365,6 +367,67 @@ static int sunxi_nfc_rst(struct sunxi_nfc *nfc)
 	return ret;
 }
 
+static int sunxi_nfc_dma_op_prepare(struct mtd_info *mtd, const void *buf,
+				    int chunksize, int nchunks,
+				    enum dma_data_direction ddir,
+				    struct scatterlist *sg)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+	struct dma_async_tx_descriptor *dmad;
+	enum dma_transfer_direction tdir;
+	dma_cookie_t dmat;
+	int ret;
+
+	if (ddir == DMA_FROM_DEVICE)
+		tdir = DMA_DEV_TO_MEM;
+	else
+		tdir = DMA_MEM_TO_DEV;
+
+	sg_init_one(sg, buf, nchunks * chunksize);
+	ret = dma_map_sg(nfc->dev, sg, 1, ddir);
+	if (!ret)
+		return -ENOMEM;
+
+	dmad = dmaengine_prep_slave_sg(nfc->dmac, sg, 1, tdir, DMA_CTRL_ACK);
+	if (IS_ERR(dmad)) {
+		ret = PTR_ERR(dmad);
+		goto err_unmap_buf;
+	}
+
+	writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD,
+	       nfc->regs + NFC_REG_CTL);
+	writel(nchunks, nfc->regs + NFC_REG_SECTOR_NUM);
+	writel(chunksize, nfc->regs + NFC_REG_CNT);
+	dmat = dmaengine_submit(dmad);
+
+	ret = dma_submit_error(dmat);
+	if (ret)
+		goto err_clr_dma_flag;
+
+	return 0;
+
+err_clr_dma_flag:
+	writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
+	       nfc->regs + NFC_REG_CTL);
+
+err_unmap_buf:
+	dma_unmap_sg(nfc->dev, sg, 1, ddir);
+	return ret;
+}
+
+static void sunxi_nfc_dma_op_cleanup(struct mtd_info *mtd,
+				     enum dma_data_direction ddir,
+				     struct scatterlist *sg)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+
+	dma_unmap_sg(nfc->dev, sg, 1, ddir);
+	writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
+	       nfc->regs + NFC_REG_CTL);
+}
+
 static int sunxi_nfc_dev_ready(struct mtd_info *mtd)
 {
 	struct nand_chip *nand = mtd_to_nand(mtd);
@@ -822,17 +885,15 @@ static void sunxi_nfc_hw_ecc_update_stats(struct mtd_info *mtd,
 }
 
 static int sunxi_nfc_hw_ecc_correct(struct mtd_info *mtd, u8 *data, u8 *oob,
-				    int step, bool *erased)
+				    int step, u32 status, bool *erased)
 {
 	struct nand_chip *nand = mtd_to_nand(mtd);
 	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
 	struct nand_ecc_ctrl *ecc = &nand->ecc;
-	u32 status, tmp;
+	u32 tmp;
 
 	*erased = false;
 
-	status = readl(nfc->regs + NFC_REG_ECC_ST);
-
 	if (status & NFC_ECC_ERR(step))
 		return -EBADMSG;
 
@@ -898,6 +959,7 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
 	*cur_off = oob_off + ecc->bytes + 4;
 
 	ret = sunxi_nfc_hw_ecc_correct(mtd, data, oob_required ? oob : NULL, 0,
+				       readl(nfc->regs + NFC_REG_ECC_ST),
 				       &erased);
 	if (erased)
 		return 1;
@@ -967,6 +1029,128 @@ static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
 		*cur_off = mtd->oobsize + mtd->writesize;
 }
 
+static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
+					    int oob_required, int page,
+					    int nchunks)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	bool randomized = nand->options & NAND_NEED_SCRAMBLING;
+	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+	struct nand_ecc_ctrl *ecc = &nand->ecc;
+	unsigned int max_bitflips = 0;
+	int ret, i, raw_mode = 0;
+	struct scatterlist sg;
+	u32 status;
+
+	ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+	if (ret)
+		return ret;
+
+	ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, nchunks,
+				       DMA_FROM_DEVICE, &sg);
+	if (ret)
+		return ret;
+
+	sunxi_nfc_hw_ecc_enable(mtd);
+	sunxi_nfc_randomizer_config(mtd, page, false);
+	sunxi_nfc_randomizer_enable(mtd);
+
+	writel((NAND_CMD_RNDOUTSTART << 16) | (NAND_CMD_RNDOUT << 8) |
+	       NAND_CMD_READSTART, nfc->regs + NFC_REG_RCMD_SET);
+
+	dma_async_issue_pending(nfc->dmac);
+
+	writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD | NFC_DATA_TRANS,
+	       nfc->regs + NFC_REG_CMD);
+
+	ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
+	if (ret)
+		dmaengine_terminate_all(nfc->dmac);
+
+	sunxi_nfc_randomizer_disable(mtd);
+	sunxi_nfc_hw_ecc_disable(mtd);
+
+	sunxi_nfc_dma_op_cleanup(mtd, DMA_FROM_DEVICE, &sg);
+
+	if (ret)
+		return ret;
+
+	status = readl(nfc->regs + NFC_REG_ECC_ST);
+
+	for (i = 0; i < nchunks; i++) {
+		int data_off = i * ecc->size;
+		int oob_off = i * (ecc->bytes + 4);
+		u8 *data = buf + data_off;
+		u8 *oob = nand->oob_poi + oob_off;
+		bool erased;
+
+		ret = sunxi_nfc_hw_ecc_correct(mtd, randomized ? data : NULL,
+					       oob_required ? oob : NULL,
+					       i, status, &erased);
+
+		/* ECC errors are handled in the second loop. */
+		if (ret < 0)
+			continue;
+
+		if (oob_required && !erased) {
+			/* TODO: use DMA to retrieve OOB */
+			nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
+			nand->read_buf(mtd, oob, ecc->bytes + 4);
+
+			sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, i,
+							    !i, page);
+		}
+
+		if (erased)
+			raw_mode = 1;
+
+		sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret);
+	}
+
+	if (status & NFC_ECC_ERR_MSK) {
+		for (i = 0; i < nchunks; i++) {
+			int data_off = i * ecc->size;
+			int oob_off = i * (ecc->bytes + 4);
+			u8 *data = buf + data_off;
+			u8 *oob = nand->oob_poi + oob_off;
+
+			if (!(status & NFC_ECC_ERR(i)))
+				continue;
+
+			/*
+			 * Re-read the data with the randomizer disabled to
+			 * identify bitflips in erased pages.
+			 */
+			if (randomized) {
+				/* TODO: use DMA to read page in raw mode */
+				nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
+					      data_off, -1);
+				nand->read_buf(mtd, data, ecc->size);
+			}
+
+			/* TODO: use DMA to retrieve OOB */
+			nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
+			nand->read_buf(mtd, oob, ecc->bytes + 4);
+
+			ret = nand_check_erased_ecc_chunk(data,	ecc->size,
+							  oob, ecc->bytes + 4,
+							  NULL, 0,
+							  ecc->strength);
+			if (ret >= 0)
+				raw_mode = 1;
+
+			sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret);
+		}
+	}
+
+	if (oob_required)
+		sunxi_nfc_hw_ecc_read_extra_oob(mtd, nand->oob_poi,
+						NULL, !raw_mode,
+						page);
+
+	return max_bitflips;
+}
+
 static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
 					const u8 *data, int data_off,
 					const u8 *oob, int oob_off,
@@ -1065,6 +1249,23 @@ static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
 	return max_bitflips;
 }
 
+static int sunxi_nfc_hw_ecc_read_page_dma(struct mtd_info *mtd,
+					  struct nand_chip *chip, u8 *buf,
+					  int oob_required, int page)
+{
+	int ret;
+
+	ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, oob_required, page,
+					       chip->ecc.steps);
+	if (ret >= 0)
+		return ret;
+
+	/* Fallback to PIO mode */
+	chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
+
+	return sunxi_nfc_hw_ecc_read_page(mtd, chip, buf, oob_required, page);
+}
+
 static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd,
 					 struct nand_chip *chip,
 					 u32 data_offs, u32 readlen,
@@ -1098,6 +1299,25 @@ static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd,
 	return max_bitflips;
 }
 
+static int sunxi_nfc_hw_ecc_read_subpage_dma(struct mtd_info *mtd,
+					     struct nand_chip *chip,
+					     u32 data_offs, u32 readlen,
+					     u8 *buf, int page)
+{
+	int nchunks = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
+	int ret;
+
+	ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, false, page, nchunks);
+	if (ret >= 0)
+		return ret;
+
+	/* Fallback to PIO mode */
+	chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
+
+	return sunxi_nfc_hw_ecc_read_subpage(mtd, chip, data_offs, readlen,
+					     buf, page);
+}
+
 static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
 				       struct nand_chip *chip,
 				       const uint8_t *buf, int oob_required,
@@ -1130,6 +1350,69 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
 	return 0;
 }
 
+static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
+					   struct nand_chip *chip,
+					   const u8 *buf,
+					   int oob_required,
+					   int page)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+	struct nand_ecc_ctrl *ecc = &nand->ecc;
+	struct scatterlist sg;
+	int ret, i;
+
+	ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+	if (ret)
+		return ret;
+
+	ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, ecc->steps,
+				       DMA_TO_DEVICE, &sg);
+	if (ret)
+		goto pio_fallback;
+
+	for (i = 0; i < ecc->steps; i++) {
+		const u8 *oob = nand->oob_poi + (i * (ecc->bytes + 4));
+
+		sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, i, !i, page);
+	}
+
+	sunxi_nfc_hw_ecc_enable(mtd);
+	sunxi_nfc_randomizer_config(mtd, page, false);
+	sunxi_nfc_randomizer_enable(mtd);
+
+	writel((NAND_CMD_RNDIN << 8) | NAND_CMD_PAGEPROG,
+	       nfc->regs + NFC_REG_RCMD_SET);
+
+	dma_async_issue_pending(nfc->dmac);
+
+	writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD |
+	       NFC_DATA_TRANS | NFC_ACCESS_DIR,
+	       nfc->regs + NFC_REG_CMD);
+
+	ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
+	if (ret)
+		dmaengine_terminate_all(nfc->dmac);
+
+	sunxi_nfc_randomizer_disable(mtd);
+	sunxi_nfc_hw_ecc_disable(mtd);
+
+	sunxi_nfc_dma_op_cleanup(mtd, DMA_TO_DEVICE, &sg);
+
+	if (ret)
+		return ret;
+
+	if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
+		/* TODO: use DMA to transfer extra OOB bytes ? */
+		sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
+						 NULL, page);
+
+	return 0;
+
+pio_fallback:
+	return sunxi_nfc_hw_ecc_write_page(mtd, chip, buf, oob_required, page);
+}
+
 static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd,
 					       struct nand_chip *chip,
 					       uint8_t *buf, int oob_required,
@@ -1550,14 +1833,27 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct mtd_info *mtd,
 				       struct nand_ecc_ctrl *ecc,
 				       struct device_node *np)
 {
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+	struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
 	int ret;
 
 	ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np);
 	if (ret)
 		return ret;
 
-	ecc->read_page = sunxi_nfc_hw_ecc_read_page;
-	ecc->write_page = sunxi_nfc_hw_ecc_write_page;
+	if (nfc->dmac) {
+		ecc->read_page = sunxi_nfc_hw_ecc_read_page_dma;
+		ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage_dma;
+		ecc->write_page = sunxi_nfc_hw_ecc_write_page_dma;
+		nand->options |= NAND_USE_BOUNCE_BUFFER;
+	} else {
+		ecc->read_page = sunxi_nfc_hw_ecc_read_page;
+		ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage;
+		ecc->write_page = sunxi_nfc_hw_ecc_write_page;
+	}
+
+	/* TODO: support DMA for raw accesses */
 	ecc->read_oob_raw = nand_read_oob_std;
 	ecc->write_oob_raw = nand_write_oob_std;
 	ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage;
@@ -1883,16 +2179,34 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
 	if (ret)
 		goto out_mod_clk_unprepare;
 
+	nfc->dmac = dma_request_slave_channel(dev, "rxtx");
+	if (nfc->dmac) {
+		struct dma_slave_config dmac_cfg = { };
+
+		dmac_cfg.src_addr = r->start + NFC_REG_IO_DATA;
+		dmac_cfg.dst_addr = dmac_cfg.src_addr;
+		dmac_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+		dmac_cfg.dst_addr_width = dmac_cfg.src_addr_width;
+		dmac_cfg.src_maxburst = 4;
+		dmac_cfg.dst_maxburst = 4;
+		dmaengine_slave_config(nfc->dmac, &dmac_cfg);
+	} else {
+		dev_warn(dev, "failed to request rxtx DMA channel\n");
+	}
+
 	platform_set_drvdata(pdev, nfc);
 
 	ret = sunxi_nand_chips_init(dev, nfc);
 	if (ret) {
 		dev_err(dev, "failed to init nand chips\n");
-		goto out_mod_clk_unprepare;
+		goto out_release_dmac;
 	}
 
 	return 0;
 
+out_release_dmac:
+	if (nfc->dmac)
+		dma_release_channel(nfc->dmac);
 out_mod_clk_unprepare:
 	clk_disable_unprepare(nfc->mod_clk);
 out_ahb_clk_unprepare:
@@ -1906,6 +2220,8 @@ static int sunxi_nfc_remove(struct platform_device *pdev)
 	struct sunxi_nfc *nfc = platform_get_drvdata(pdev);
 
 	sunxi_nand_chips_cleanup(nfc);
+	if (nfc->dmac)
+		dma_release_channel(nfc->dmac);
 	clk_disable_unprepare(nfc->mod_clk);
 	clk_disable_unprepare(nfc->ahb_clk);
 
-- 
2.5.0

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH v3 2/2] mtd: nand: sunxi: update DT bindings
  2016-04-15 13:10 [PATCH v3 1/2] mtd: nand: sunxi: add support for DMA assisted operations Boris Brezillon
@ 2016-04-15 13:10 ` Boris Brezillon
  2016-04-18 17:02   ` Maxime Ripard
  2016-06-06 16:28 ` [PATCH v3 1/2] mtd: nand: sunxi: add support for DMA assisted operations Boris Brezillon
  1 sibling, 1 reply; 4+ messages in thread
From: Boris Brezillon @ 2016-04-15 13:10 UTC (permalink / raw)
  To: Boris Brezillon, Richard Weinberger, linux-mtd
  Cc: David Woodhouse, Brian Norris, Maxime Ripard, Chen-Yu Tsai,
	linux-sunxi

Document dmas and dma-names properties.

Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
Acked-by: Rob Herring <robh@kernel.org>
---
 Documentation/devicetree/bindings/mtd/sunxi-nand.txt | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/Documentation/devicetree/bindings/mtd/sunxi-nand.txt b/Documentation/devicetree/bindings/mtd/sunxi-nand.txt
index 086d6f4..6fdf8f6 100644
--- a/Documentation/devicetree/bindings/mtd/sunxi-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/sunxi-nand.txt
@@ -11,6 +11,10 @@ Required properties:
     * "ahb" : AHB gating clock
     * "mod" : nand controller clock
 
+Optional properties:
+- dmas : shall reference DMA channel associated to the NAND controller.
+- dma-names : shall be "rxtx".
+
 Optional children nodes:
 Children nodes represent the available nand chips.
 
-- 
2.5.0

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH v3 2/2] mtd: nand: sunxi: update DT bindings
  2016-04-15 13:10 ` [PATCH v3 2/2] mtd: nand: sunxi: update DT bindings Boris Brezillon
@ 2016-04-18 17:02   ` Maxime Ripard
  0 siblings, 0 replies; 4+ messages in thread
From: Maxime Ripard @ 2016-04-18 17:02 UTC (permalink / raw)
  To: Boris Brezillon
  Cc: Richard Weinberger, linux-mtd, David Woodhouse, Brian Norris,
	Chen-Yu Tsai, linux-sunxi

[-- Attachment #1: Type: text/plain, Size: 421 bytes --]

On Fri, Apr 15, 2016 at 03:10:31PM +0200, Boris Brezillon wrote:
> Document dmas and dma-names properties.
> 
> Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
> Acked-by: Rob Herring <robh@kernel.org>

Acked-by: Maxime Ripard <maxime.ripard@free-electrons.com>

Thanks!
Maxime

-- 
Maxime Ripard, Free Electrons
Embedded Linux, Kernel and Android engineering
http://free-electrons.com

[-- Attachment #2: Digital signature --]
[-- Type: application/pgp-signature, Size: 819 bytes --]

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH v3 1/2] mtd: nand: sunxi: add support for DMA assisted operations
  2016-04-15 13:10 [PATCH v3 1/2] mtd: nand: sunxi: add support for DMA assisted operations Boris Brezillon
  2016-04-15 13:10 ` [PATCH v3 2/2] mtd: nand: sunxi: update DT bindings Boris Brezillon
@ 2016-06-06 16:28 ` Boris Brezillon
  1 sibling, 0 replies; 4+ messages in thread
From: Boris Brezillon @ 2016-06-06 16:28 UTC (permalink / raw)
  To: Boris Brezillon, Richard Weinberger, linux-mtd
  Cc: Maxime Ripard, Brian Norris, David Woodhouse, Chen-Yu Tsai,
	linux-sunxi

On Fri, 15 Apr 2016 15:10:30 +0200
Boris Brezillon <boris.brezillon@free-electrons.com> wrote:

> The sunxi NAND controller is able to pipeline ECC operations only when
> operated in DMA mode, which improves a lot NAND throughput while keeping
> CPU usage low.

Applied both.

> 
> Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
> ---
> Changes since v2:
> - completely drop the generic approach base on Russell's feedback
>   (doing DMA on non-lowmem memory is unsafe)
> - fix a bug in sunxi_nfc_hw_ecc_read_chunks_dma() where the ECC status
>   value was lost when at least one bitflip was found in an erased chunk
> 
> Changes since v1:
> - reworked sg_alloc_table_from_buf() to avoid splitting contiguous
>   vmalloced area
> - fixed a bug in the read_dma()
> - fixed dma_direction flag in write_dma()
> 
>  drivers/mtd/nand/sunxi_nand.c | 330 +++++++++++++++++++++++++++++++++++++++++-
>  1 file changed, 323 insertions(+), 7 deletions(-)
> 
> diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
> index 1baf8983..5299ab0 100644
> --- a/drivers/mtd/nand/sunxi_nand.c
> +++ b/drivers/mtd/nand/sunxi_nand.c
> @@ -153,6 +153,7 @@
>  
>  /* define bit use in NFC_ECC_ST */
>  #define NFC_ECC_ERR(x)		BIT(x)
> +#define NFC_ECC_ERR_MSK		GENMASK(15, 0)
>  #define NFC_ECC_PAT_FOUND(x)	BIT(x + 16)
>  #define NFC_ECC_ERR_CNT(b, x)	(((x) >> (((b) % 4) * 8)) & 0xff)
>  
> @@ -273,6 +274,7 @@ struct sunxi_nfc {
>  	unsigned long clk_rate;
>  	struct list_head chips;
>  	struct completion complete;
> +	struct dma_chan *dmac;
>  };
>  
>  static inline struct sunxi_nfc *to_sunxi_nfc(struct nand_hw_control *ctrl)
> @@ -365,6 +367,67 @@ static int sunxi_nfc_rst(struct sunxi_nfc *nfc)
>  	return ret;
>  }
>  
> +static int sunxi_nfc_dma_op_prepare(struct mtd_info *mtd, const void *buf,
> +				    int chunksize, int nchunks,
> +				    enum dma_data_direction ddir,
> +				    struct scatterlist *sg)
> +{
> +	struct nand_chip *nand = mtd_to_nand(mtd);
> +	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
> +	struct dma_async_tx_descriptor *dmad;
> +	enum dma_transfer_direction tdir;
> +	dma_cookie_t dmat;
> +	int ret;
> +
> +	if (ddir == DMA_FROM_DEVICE)
> +		tdir = DMA_DEV_TO_MEM;
> +	else
> +		tdir = DMA_MEM_TO_DEV;
> +
> +	sg_init_one(sg, buf, nchunks * chunksize);
> +	ret = dma_map_sg(nfc->dev, sg, 1, ddir);
> +	if (!ret)
> +		return -ENOMEM;
> +
> +	dmad = dmaengine_prep_slave_sg(nfc->dmac, sg, 1, tdir, DMA_CTRL_ACK);
> +	if (IS_ERR(dmad)) {
> +		ret = PTR_ERR(dmad);
> +		goto err_unmap_buf;
> +	}
> +
> +	writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD,
> +	       nfc->regs + NFC_REG_CTL);
> +	writel(nchunks, nfc->regs + NFC_REG_SECTOR_NUM);
> +	writel(chunksize, nfc->regs + NFC_REG_CNT);
> +	dmat = dmaengine_submit(dmad);
> +
> +	ret = dma_submit_error(dmat);
> +	if (ret)
> +		goto err_clr_dma_flag;
> +
> +	return 0;
> +
> +err_clr_dma_flag:
> +	writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
> +	       nfc->regs + NFC_REG_CTL);
> +
> +err_unmap_buf:
> +	dma_unmap_sg(nfc->dev, sg, 1, ddir);
> +	return ret;
> +}
> +
> +static void sunxi_nfc_dma_op_cleanup(struct mtd_info *mtd,
> +				     enum dma_data_direction ddir,
> +				     struct scatterlist *sg)
> +{
> +	struct nand_chip *nand = mtd_to_nand(mtd);
> +	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
> +
> +	dma_unmap_sg(nfc->dev, sg, 1, ddir);
> +	writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
> +	       nfc->regs + NFC_REG_CTL);
> +}
> +
>  static int sunxi_nfc_dev_ready(struct mtd_info *mtd)
>  {
>  	struct nand_chip *nand = mtd_to_nand(mtd);
> @@ -822,17 +885,15 @@ static void sunxi_nfc_hw_ecc_update_stats(struct mtd_info *mtd,
>  }
>  
>  static int sunxi_nfc_hw_ecc_correct(struct mtd_info *mtd, u8 *data, u8 *oob,
> -				    int step, bool *erased)
> +				    int step, u32 status, bool *erased)
>  {
>  	struct nand_chip *nand = mtd_to_nand(mtd);
>  	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
>  	struct nand_ecc_ctrl *ecc = &nand->ecc;
> -	u32 status, tmp;
> +	u32 tmp;
>  
>  	*erased = false;
>  
> -	status = readl(nfc->regs + NFC_REG_ECC_ST);
> -
>  	if (status & NFC_ECC_ERR(step))
>  		return -EBADMSG;
>  
> @@ -898,6 +959,7 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
>  	*cur_off = oob_off + ecc->bytes + 4;
>  
>  	ret = sunxi_nfc_hw_ecc_correct(mtd, data, oob_required ? oob : NULL, 0,
> +				       readl(nfc->regs + NFC_REG_ECC_ST),
>  				       &erased);
>  	if (erased)
>  		return 1;
> @@ -967,6 +1029,128 @@ static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
>  		*cur_off = mtd->oobsize + mtd->writesize;
>  }
>  
> +static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
> +					    int oob_required, int page,
> +					    int nchunks)
> +{
> +	struct nand_chip *nand = mtd_to_nand(mtd);
> +	bool randomized = nand->options & NAND_NEED_SCRAMBLING;
> +	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
> +	struct nand_ecc_ctrl *ecc = &nand->ecc;
> +	unsigned int max_bitflips = 0;
> +	int ret, i, raw_mode = 0;
> +	struct scatterlist sg;
> +	u32 status;
> +
> +	ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
> +	if (ret)
> +		return ret;
> +
> +	ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, nchunks,
> +				       DMA_FROM_DEVICE, &sg);
> +	if (ret)
> +		return ret;
> +
> +	sunxi_nfc_hw_ecc_enable(mtd);
> +	sunxi_nfc_randomizer_config(mtd, page, false);
> +	sunxi_nfc_randomizer_enable(mtd);
> +
> +	writel((NAND_CMD_RNDOUTSTART << 16) | (NAND_CMD_RNDOUT << 8) |
> +	       NAND_CMD_READSTART, nfc->regs + NFC_REG_RCMD_SET);
> +
> +	dma_async_issue_pending(nfc->dmac);
> +
> +	writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD | NFC_DATA_TRANS,
> +	       nfc->regs + NFC_REG_CMD);
> +
> +	ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
> +	if (ret)
> +		dmaengine_terminate_all(nfc->dmac);
> +
> +	sunxi_nfc_randomizer_disable(mtd);
> +	sunxi_nfc_hw_ecc_disable(mtd);
> +
> +	sunxi_nfc_dma_op_cleanup(mtd, DMA_FROM_DEVICE, &sg);
> +
> +	if (ret)
> +		return ret;
> +
> +	status = readl(nfc->regs + NFC_REG_ECC_ST);
> +
> +	for (i = 0; i < nchunks; i++) {
> +		int data_off = i * ecc->size;
> +		int oob_off = i * (ecc->bytes + 4);
> +		u8 *data = buf + data_off;
> +		u8 *oob = nand->oob_poi + oob_off;
> +		bool erased;
> +
> +		ret = sunxi_nfc_hw_ecc_correct(mtd, randomized ? data : NULL,
> +					       oob_required ? oob : NULL,
> +					       i, status, &erased);
> +
> +		/* ECC errors are handled in the second loop. */
> +		if (ret < 0)
> +			continue;
> +
> +		if (oob_required && !erased) {
> +			/* TODO: use DMA to retrieve OOB */
> +			nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
> +			nand->read_buf(mtd, oob, ecc->bytes + 4);
> +
> +			sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, i,
> +							    !i, page);
> +		}
> +
> +		if (erased)
> +			raw_mode = 1;
> +
> +		sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret);
> +	}
> +
> +	if (status & NFC_ECC_ERR_MSK) {
> +		for (i = 0; i < nchunks; i++) {
> +			int data_off = i * ecc->size;
> +			int oob_off = i * (ecc->bytes + 4);
> +			u8 *data = buf + data_off;
> +			u8 *oob = nand->oob_poi + oob_off;
> +
> +			if (!(status & NFC_ECC_ERR(i)))
> +				continue;
> +
> +			/*
> +			 * Re-read the data with the randomizer disabled to
> +			 * identify bitflips in erased pages.
> +			 */
> +			if (randomized) {
> +				/* TODO: use DMA to read page in raw mode */
> +				nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
> +					      data_off, -1);
> +				nand->read_buf(mtd, data, ecc->size);
> +			}
> +
> +			/* TODO: use DMA to retrieve OOB */
> +			nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
> +			nand->read_buf(mtd, oob, ecc->bytes + 4);
> +
> +			ret = nand_check_erased_ecc_chunk(data,	ecc->size,
> +							  oob, ecc->bytes + 4,
> +							  NULL, 0,
> +							  ecc->strength);
> +			if (ret >= 0)
> +				raw_mode = 1;
> +
> +			sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret);
> +		}
> +	}
> +
> +	if (oob_required)
> +		sunxi_nfc_hw_ecc_read_extra_oob(mtd, nand->oob_poi,
> +						NULL, !raw_mode,
> +						page);
> +
> +	return max_bitflips;
> +}
> +
>  static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
>  					const u8 *data, int data_off,
>  					const u8 *oob, int oob_off,
> @@ -1065,6 +1249,23 @@ static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
>  	return max_bitflips;
>  }
>  
> +static int sunxi_nfc_hw_ecc_read_page_dma(struct mtd_info *mtd,
> +					  struct nand_chip *chip, u8 *buf,
> +					  int oob_required, int page)
> +{
> +	int ret;
> +
> +	ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, oob_required, page,
> +					       chip->ecc.steps);
> +	if (ret >= 0)
> +		return ret;
> +
> +	/* Fallback to PIO mode */
> +	chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
> +
> +	return sunxi_nfc_hw_ecc_read_page(mtd, chip, buf, oob_required, page);
> +}
> +
>  static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd,
>  					 struct nand_chip *chip,
>  					 u32 data_offs, u32 readlen,
> @@ -1098,6 +1299,25 @@ static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd,
>  	return max_bitflips;
>  }
>  
> +static int sunxi_nfc_hw_ecc_read_subpage_dma(struct mtd_info *mtd,
> +					     struct nand_chip *chip,
> +					     u32 data_offs, u32 readlen,
> +					     u8 *buf, int page)
> +{
> +	int nchunks = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
> +	int ret;
> +
> +	ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, false, page, nchunks);
> +	if (ret >= 0)
> +		return ret;
> +
> +	/* Fallback to PIO mode */
> +	chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
> +
> +	return sunxi_nfc_hw_ecc_read_subpage(mtd, chip, data_offs, readlen,
> +					     buf, page);
> +}
> +
>  static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
>  				       struct nand_chip *chip,
>  				       const uint8_t *buf, int oob_required,
> @@ -1130,6 +1350,69 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
>  	return 0;
>  }
>  
> +static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
> +					   struct nand_chip *chip,
> +					   const u8 *buf,
> +					   int oob_required,
> +					   int page)
> +{
> +	struct nand_chip *nand = mtd_to_nand(mtd);
> +	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
> +	struct nand_ecc_ctrl *ecc = &nand->ecc;
> +	struct scatterlist sg;
> +	int ret, i;
> +
> +	ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
> +	if (ret)
> +		return ret;
> +
> +	ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, ecc->steps,
> +				       DMA_TO_DEVICE, &sg);
> +	if (ret)
> +		goto pio_fallback;
> +
> +	for (i = 0; i < ecc->steps; i++) {
> +		const u8 *oob = nand->oob_poi + (i * (ecc->bytes + 4));
> +
> +		sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, i, !i, page);
> +	}
> +
> +	sunxi_nfc_hw_ecc_enable(mtd);
> +	sunxi_nfc_randomizer_config(mtd, page, false);
> +	sunxi_nfc_randomizer_enable(mtd);
> +
> +	writel((NAND_CMD_RNDIN << 8) | NAND_CMD_PAGEPROG,
> +	       nfc->regs + NFC_REG_RCMD_SET);
> +
> +	dma_async_issue_pending(nfc->dmac);
> +
> +	writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD |
> +	       NFC_DATA_TRANS | NFC_ACCESS_DIR,
> +	       nfc->regs + NFC_REG_CMD);
> +
> +	ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
> +	if (ret)
> +		dmaengine_terminate_all(nfc->dmac);
> +
> +	sunxi_nfc_randomizer_disable(mtd);
> +	sunxi_nfc_hw_ecc_disable(mtd);
> +
> +	sunxi_nfc_dma_op_cleanup(mtd, DMA_TO_DEVICE, &sg);
> +
> +	if (ret)
> +		return ret;
> +
> +	if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
> +		/* TODO: use DMA to transfer extra OOB bytes ? */
> +		sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
> +						 NULL, page);
> +
> +	return 0;
> +
> +pio_fallback:
> +	return sunxi_nfc_hw_ecc_write_page(mtd, chip, buf, oob_required, page);
> +}
> +
>  static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd,
>  					       struct nand_chip *chip,
>  					       uint8_t *buf, int oob_required,
> @@ -1550,14 +1833,27 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct mtd_info *mtd,
>  				       struct nand_ecc_ctrl *ecc,
>  				       struct device_node *np)
>  {
> +	struct nand_chip *nand = mtd_to_nand(mtd);
> +	struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
> +	struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
>  	int ret;
>  
>  	ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np);
>  	if (ret)
>  		return ret;
>  
> -	ecc->read_page = sunxi_nfc_hw_ecc_read_page;
> -	ecc->write_page = sunxi_nfc_hw_ecc_write_page;
> +	if (nfc->dmac) {
> +		ecc->read_page = sunxi_nfc_hw_ecc_read_page_dma;
> +		ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage_dma;
> +		ecc->write_page = sunxi_nfc_hw_ecc_write_page_dma;
> +		nand->options |= NAND_USE_BOUNCE_BUFFER;
> +	} else {
> +		ecc->read_page = sunxi_nfc_hw_ecc_read_page;
> +		ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage;
> +		ecc->write_page = sunxi_nfc_hw_ecc_write_page;
> +	}
> +
> +	/* TODO: support DMA for raw accesses */
>  	ecc->read_oob_raw = nand_read_oob_std;
>  	ecc->write_oob_raw = nand_write_oob_std;
>  	ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage;
> @@ -1883,16 +2179,34 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
>  	if (ret)
>  		goto out_mod_clk_unprepare;
>  
> +	nfc->dmac = dma_request_slave_channel(dev, "rxtx");
> +	if (nfc->dmac) {
> +		struct dma_slave_config dmac_cfg = { };
> +
> +		dmac_cfg.src_addr = r->start + NFC_REG_IO_DATA;
> +		dmac_cfg.dst_addr = dmac_cfg.src_addr;
> +		dmac_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
> +		dmac_cfg.dst_addr_width = dmac_cfg.src_addr_width;
> +		dmac_cfg.src_maxburst = 4;
> +		dmac_cfg.dst_maxburst = 4;
> +		dmaengine_slave_config(nfc->dmac, &dmac_cfg);
> +	} else {
> +		dev_warn(dev, "failed to request rxtx DMA channel\n");
> +	}
> +
>  	platform_set_drvdata(pdev, nfc);
>  
>  	ret = sunxi_nand_chips_init(dev, nfc);
>  	if (ret) {
>  		dev_err(dev, "failed to init nand chips\n");
> -		goto out_mod_clk_unprepare;
> +		goto out_release_dmac;
>  	}
>  
>  	return 0;
>  
> +out_release_dmac:
> +	if (nfc->dmac)
> +		dma_release_channel(nfc->dmac);
>  out_mod_clk_unprepare:
>  	clk_disable_unprepare(nfc->mod_clk);
>  out_ahb_clk_unprepare:
> @@ -1906,6 +2220,8 @@ static int sunxi_nfc_remove(struct platform_device *pdev)
>  	struct sunxi_nfc *nfc = platform_get_drvdata(pdev);
>  
>  	sunxi_nand_chips_cleanup(nfc);
> +	if (nfc->dmac)
> +		dma_release_channel(nfc->dmac);
>  	clk_disable_unprepare(nfc->mod_clk);
>  	clk_disable_unprepare(nfc->ahb_clk);
>  



-- 
Boris Brezillon, Free Electrons
Embedded Linux and Kernel engineering
http://free-electrons.com

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2016-06-06 16:28 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2016-04-15 13:10 [PATCH v3 1/2] mtd: nand: sunxi: add support for DMA assisted operations Boris Brezillon
2016-04-15 13:10 ` [PATCH v3 2/2] mtd: nand: sunxi: update DT bindings Boris Brezillon
2016-04-18 17:02   ` Maxime Ripard
2016-06-06 16:28 ` [PATCH v3 1/2] mtd: nand: sunxi: add support for DMA assisted operations Boris Brezillon

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).