linux-crypto.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Simon Baatz <gmbnomis@gmail.com>
To: phil.sutter@viprinet.com
Cc: linux-crypto@vger.kernel.org, cloudy.linux@gmail.com
Subject: [PATCH 1/1] mv_dma: mv_cesa: fixes for clock init
Date: Tue, 26 Jun 2012 22:31:52 +0200	[thread overview]
Message-ID: <1340742712-5821-2-git-send-email-gmbnomis@gmail.com> (raw)
In-Reply-To: <1340742712-5821-1-git-send-email-gmbnomis@gmail.com>

mv_dma tries to access CESA engine registers before the CESA clock is
enabled.  Shift the clock enable code to the proper position.

Additionally, both mv_dma and mv_cesa did not disable the clock if something
went wrong during init.

Signed-off-by: Simon Baatz <gmbnomis@gmail.com>
---
 drivers/crypto/mv_cesa.c |    7 ++++++-
 drivers/crypto/mv_dma.c  |   44 +++++++++++++++++++++++++++++---------------
 2 files changed, 35 insertions(+), 16 deletions(-)

diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index b75fdf5..aa05567 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -1308,7 +1308,8 @@ static int mv_probe(struct platform_device *pdev)
 		ret = -ENOMEM;
 		goto err_mapping;
 	}
-	if (set_dma_desclist_size(&cpg->desclist, MV_DMA_INIT_POOLSIZE)) {
+	ret = set_dma_desclist_size(&cpg->desclist, MV_DMA_INIT_POOLSIZE);
+	if (ret) {
 		printk(KERN_ERR MV_CESA "failed to initialise poolsize\n");
 		goto err_pool;
 	}
@@ -1350,6 +1351,10 @@ err_mapping:
 	dma_unmap_single(&pdev->dev, cpg->sa_sram_dma,
 			sizeof(struct sec_accel_sram), DMA_TO_DEVICE);
 	free_irq(irq, cp);
+	if (!IS_ERR(cp->clk)) {
+		clk_disable_unprepare(cp->clk);
+		clk_put(cp->clk);
+	}
 err_thread:
 	kthread_stop(cp->queue_th);
 err_unmap_sram:
diff --git a/drivers/crypto/mv_dma.c b/drivers/crypto/mv_dma.c
index dd1ce02..9440fbc 100644
--- a/drivers/crypto/mv_dma.c
+++ b/drivers/crypto/mv_dma.c
@@ -350,23 +350,39 @@ static int mv_init_engine(struct platform_device *pdev, u32 ctrl_init_val,
 	tpg.dev = &pdev->dev;
 	tpg.print_and_clear_irq = pc_irq;
 
+	/* Not all platforms can gate the clock, so it is not
+	   an error if the clock does not exists. */
+	tpg.clk = clk_get(&pdev->dev, NULL);
+	if (!IS_ERR(tpg.clk))
+		clk_prepare_enable(tpg.clk);
+
 	/* setup address decoding */
 	res = platform_get_resource_byname(pdev,
 			IORESOURCE_MEM, "regs deco");
-	if (!res)
-		return -ENXIO;
-	if (!(deco = ioremap(res->start, resource_size(res))))
-		return -ENOMEM;
+	if (!res) {
+		rc = -ENXIO;
+		goto out_disable_clk;
+	}
+	deco = ioremap(res->start, resource_size(res));
+	if (!deco) {
+		rc = -ENOMEM;
+		goto out_disable_clk;
+	}
 	setup_mbus_windows(deco, pdev->dev.platform_data, win_setter);
 	iounmap(deco);
 
 	/* get register start address */
 	res = platform_get_resource_byname(pdev,
 			IORESOURCE_MEM, "regs control and error");
-	if (!res)
-		return -ENXIO;
-	if (!(tpg.reg = ioremap(res->start, resource_size(res))))
-		return -ENOMEM;
+	if (!res) {
+		rc = -ENXIO;
+		goto out_disable_clk;
+	}
+	tpg.reg = ioremap(res->start, resource_size(res));
+	if (!tpg.reg) {
+		rc = -ENOMEM;
+		goto out_disable_clk;
+	}
 
 	/* get the IRQ */
 	tpg.irq = platform_get_irq(pdev, 0);
@@ -375,12 +391,6 @@ static int mv_init_engine(struct platform_device *pdev, u32 ctrl_init_val,
 		goto out_unmap_reg;
 	}
 
-	/* Not all platforms can gate the clock, so it is not
-	   an error if the clock does not exists. */
-	tpg.clk = clk_get(&pdev->dev, NULL);
-	if (!IS_ERR(tpg.clk))
-		clk_prepare_enable(tpg.clk);
-
 	/* initialise DMA descriptor list */
 	if (init_dma_desclist(&tpg.desclist, tpg.dev,
 			sizeof(struct mv_dma_desc), MV_DMA_ALIGN, 0)) {
@@ -421,6 +431,11 @@ out_free_desclist:
 	fini_dma_desclist(&tpg.desclist);
 out_unmap_reg:
 	iounmap(tpg.reg);
+out_disable_clk:
+	if (!IS_ERR(tpg.clk)) {
+		clk_disable_unprepare(tpg.clk);
+		clk_put(tpg.clk);
+	}
 	tpg.dev = NULL;
 	return rc;
 }
@@ -517,4 +532,3 @@ module_exit(mv_dma_exit);
 MODULE_AUTHOR("Phil Sutter <phil.sutter@viprinet.com>");
 MODULE_DESCRIPTION("Support for Marvell's IDMA/TDMA engines");
 MODULE_LICENSE("GPL");
-
-- 
1.7.9.5

  reply	other threads:[~2012-06-26 20:32 UTC|newest]

Thread overview: 67+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2012-05-25 16:08 RFC: support for MV_CESA with TDMA Phil Sutter
2012-05-25 16:08 ` [PATCH 01/13] mv_cesa: do not use scatterlist iterators Phil Sutter
2012-05-25 16:08 ` [PATCH 02/13] mv_cesa: minor formatting cleanup, will all make sense soon Phil Sutter
2012-05-25 16:08 ` [PATCH 03/13] mv_cesa: prepare the full sram config in dram Phil Sutter
2012-05-25 16:08 ` [PATCH 04/13] mv_cesa: split up processing callbacks Phil Sutter
2012-05-25 16:08 ` [PATCH 05/13] add a driver for the Marvell TDMA engine Phil Sutter
2012-05-25 16:08 ` [PATCH 06/13] mv_cesa: use TDMA engine for data transfers Phil Sutter
2012-05-25 16:08 ` [PATCH 07/13] mv_cesa: have TDMA copy back the digest result Phil Sutter
2012-05-25 16:08 ` [PATCH 08/13] mv_cesa: fetch extra_bytes via TDMA engine, too Phil Sutter
2012-05-25 16:08 ` [PATCH 09/13] mv_cesa: implementing packet chain mode, only aes for now Phil Sutter
2012-05-25 16:08 ` [PATCH 10/13] mv_cesa: reorganise mv_start_new_hash_req a bit Phil Sutter
2012-05-25 16:08 ` [PATCH 11/13] mv_cesa: implement descriptor chaining for hashes, too Phil Sutter
2012-05-25 16:08 ` [PATCH 12/13] mv_cesa: drop the now unused process callback Phil Sutter
2012-05-25 16:08 ` [PATCH 13/13] mv_cesa, mv_tdma: outsource common dma-pool handling code Phil Sutter
2012-05-27 14:03 ` RFC: support for MV_CESA with TDMA cloudy.linux
2012-05-29 11:34   ` Phil Sutter
2012-06-12 10:04 ` Herbert Xu
2012-06-12 10:24   ` Phil Sutter
2012-06-12 11:39     ` Herbert Xu
2012-06-12 17:17       ` RFC: support for MV_CESA with IDMA or TDMA Phil Sutter
2012-06-12 17:17         ` [PATCH 01/13] mv_cesa: do not use scatterlist iterators Phil Sutter
2012-06-12 17:17         ` [PATCH 02/13] mv_cesa: minor formatting cleanup, will all make sense soon Phil Sutter
2012-06-12 17:17         ` [PATCH 03/13] mv_cesa: prepare the full sram config in dram Phil Sutter
2012-06-12 17:17         ` [PATCH 04/13] mv_cesa: split up processing callbacks Phil Sutter
2012-06-12 17:17         ` [PATCH 05/13] add a driver for the Marvell IDMA/TDMA engines Phil Sutter
2012-06-12 17:17         ` [PATCH 06/13] mv_cesa: use DMA engine for data transfers Phil Sutter
2012-06-12 17:17         ` [PATCH 07/13] mv_cesa: have DMA engine copy back the digest result Phil Sutter
2012-06-12 17:17         ` [PATCH 08/13] mv_cesa: fetch extra_bytes via DMA engine, too Phil Sutter
2012-06-12 17:17         ` [PATCH 09/13] mv_cesa: implementing packet chain mode, only aes for now Phil Sutter
2012-06-12 17:17         ` [PATCH 10/13] mv_cesa: reorganise mv_start_new_hash_req a bit Phil Sutter
2012-06-12 17:17         ` [PATCH 11/13] mv_cesa: implement descriptor chaining for hashes, too Phil Sutter
2012-06-12 17:17         ` [PATCH 12/13] mv_cesa: drop the now unused process callback Phil Sutter
2012-06-12 17:17         ` [PATCH 13/13] mv_cesa, mv_dma: outsource common dma-pool handling code Phil Sutter
2012-06-15  1:40         ` RFC: support for MV_CESA with IDMA or TDMA cloudy.linux
2012-06-15  9:51           ` Phil Sutter
2012-06-16  0:20         ` [PATCH 0/2] Fixes " Simon Baatz
2012-06-16  0:20           ` [PATCH 1/2] mv_dma: fix mv_init_engine() error case Simon Baatz
2012-06-16  0:20           ` [PATCH 2/2] ARM: Orion: mv_dma: Add support for clk Simon Baatz
2012-06-18 13:47           ` [PATCH 0/2] Fixes for MV_CESA with IDMA or TDMA Phil Sutter
2012-06-18 20:12             ` Simon Baatz
2012-06-19 11:51               ` Phil Sutter
2012-06-19 15:09                 ` cloudy.linux
2012-06-19 17:13                   ` Phil Sutter
2012-06-20  1:16                     ` cloudy.linux
2012-07-16  9:32                       ` Andrew Lunn
2012-07-16 13:52                         ` Phil Sutter
2012-07-16 14:03                           ` Andrew Lunn
2012-07-16 14:53                             ` Phil Sutter
2012-07-16 17:32                               ` Simon Baatz
2012-07-16 17:59                                 ` Andrew Lunn
2012-06-20 13:31               ` cloudy.linux
2012-06-20 15:41                 ` Phil Sutter
2012-06-25 13:40                   ` Phil Sutter
2012-06-25 14:25                     ` cloudy.linux
2012-06-25 14:36                       ` Phil Sutter
2012-06-25 16:05                       ` cloudy.linux
2012-06-25 21:59                         ` Phil Sutter
2012-06-26 11:24                           ` cloudy.linux
2012-06-30  7:35                           ` cloudy.linux
2012-07-06 15:30                             ` Phil Sutter
2012-07-08  5:38                               ` cloudy.linux
2012-07-09 12:54                                 ` Phil Sutter
2012-07-31 12:12                                   ` cloudy.linux
2012-10-23 17:11                                     ` Phil Sutter
2012-06-26 20:31             ` [PATCH 0/1] MV_CESA with DMA: Clk init fixes Simon Baatz
2012-06-26 20:31               ` Simon Baatz [this message]
2012-07-06 15:05               ` Phil Sutter

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1340742712-5821-2-git-send-email-gmbnomis@gmail.com \
    --to=gmbnomis@gmail.com \
    --cc=cloudy.linux@gmail.com \
    --cc=linux-crypto@vger.kernel.org \
    --cc=phil.sutter@viprinet.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).