linux-mmc.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2] mmc: dw_mmc: Add support for pre_req and post_req
@ 2012-01-26  7:32 Seungwon Jeon
  2012-01-26  7:40 ` Jaehoon Chung
  2012-02-05  0:02 ` Chris Ball
  0 siblings, 2 replies; 3+ messages in thread
From: Seungwon Jeon @ 2012-01-26  7:32 UTC (permalink / raw)
  To: linux-mmc; +Cc: 'Chris Ball', 'Jaehoon Chung'

This patch implements pre_req and post_req in dw_mmc
to support asynchronous mmc request.

Signed-off-by: Seungwon Jeon <tgih.jun@samsung.com>
---
Changes in v2:
	Consider system DMA case as well as IDMAC.

NOTE:
    Performance gains the following.
    Sequential read and write improve 23% and 5% respectively.

 drivers/mmc/host/dw_mmc.c |  129 +++++++++++++++++++++++++++++++++++---------
 1 files changed, 102 insertions(+), 27 deletions(-)

diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 0e34279..5c91acb 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -297,14 +297,24 @@ static void dw_mci_stop_dma(struct dw_mci *host)
 }
 
 #ifdef CONFIG_MMC_DW_IDMAC
+static int dw_mci_get_dma_dir(struct mmc_data *data)
+{
+	if (data->flags & MMC_DATA_WRITE)
+		return DMA_TO_DEVICE;
+	else
+		return DMA_FROM_DEVICE;
+}
+
 static void dw_mci_dma_cleanup(struct dw_mci *host)
 {
 	struct mmc_data *data = host->data;
 
 	if (data)
-		dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
-			     ((data->flags & MMC_DATA_WRITE)
-			      ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
+		if (!data->host_cookie)
+			dma_unmap_sg(&host->pdev->dev,
+				     data->sg,
+				     data->sg_len,
+				     dw_mci_get_dma_dir(data));
 }
 
 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
@@ -420,26 +430,15 @@ static int dw_mci_idmac_init(struct dw_mci *host)
 	return 0;
 }
 
-static struct dw_mci_dma_ops dw_mci_idmac_ops = {
-	.init = dw_mci_idmac_init,
-	.start = dw_mci_idmac_start_dma,
-	.stop = dw_mci_idmac_stop_dma,
-	.complete = dw_mci_idmac_complete_dma,
-	.cleanup = dw_mci_dma_cleanup,
-};
-#endif /* CONFIG_MMC_DW_IDMAC */
-
-static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
+static int dw_mci_pre_dma_transfer(struct dw_mci *host,
+				   struct mmc_data *data,
+				   bool next)
 {
 	struct scatterlist *sg;
-	unsigned int i, direction, sg_len;
-	u32 temp;
-
-	host->using_dma = 0;
+	unsigned int i, sg_len;
 
-	/* If we don't have a channel, we can't do DMA */
-	if (!host->use_dma)
-		return -ENODEV;
+	if (!next && data->host_cookie)
+		return data->host_cookie;
 
 	/*
 	 * We don't do DMA on "complex" transfers, i.e. with
@@ -448,6 +447,7 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
 	 */
 	if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
 		return -EINVAL;
+
 	if (data->blksz & 3)
 		return -EINVAL;
 
@@ -456,15 +456,88 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
 			return -EINVAL;
 	}
 
-	host->using_dma = 1;
+	sg_len = dma_map_sg(&host->pdev->dev,
+			    data->sg,
+			    data->sg_len,
+			    dw_mci_get_dma_dir(data));
+	if (sg_len == 0)
+		return -EINVAL;
 
-	if (data->flags & MMC_DATA_READ)
-		direction = DMA_FROM_DEVICE;
-	else
-		direction = DMA_TO_DEVICE;
+	if (next)
+		data->host_cookie = sg_len;
+
+	return sg_len;
+}
+
+static struct dw_mci_dma_ops dw_mci_idmac_ops = {
+	.init = dw_mci_idmac_init,
+	.start = dw_mci_idmac_start_dma,
+	.stop = dw_mci_idmac_stop_dma,
+	.complete = dw_mci_idmac_complete_dma,
+	.cleanup = dw_mci_dma_cleanup,
+};
+#else
+static int dw_mci_pre_dma_transfer(struct dw_mci *host,
+				   struct mmc_data *data,
+				   bool next)
+{
+	return -ENOSYS;
+}
+#endif /* CONFIG_MMC_DW_IDMAC */
+
+static void dw_mci_pre_req(struct mmc_host *mmc,
+			   struct mmc_request *mrq,
+			   bool is_first_req)
+{
+	struct dw_mci_slot *slot = mmc_priv(mmc);
+	struct mmc_data *data = mrq->data;
+
+	if (!slot->host->use_dma || !data)
+		return;
+
+	if (data->host_cookie) {
+		data->host_cookie = 0;
+		return;
+	}
+
+	if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
+		data->host_cookie = 0;
+}
+
+static void dw_mci_post_req(struct mmc_host *mmc,
+			    struct mmc_request *mrq,
+			    int err)
+{
+	struct dw_mci_slot *slot = mmc_priv(mmc);
+	struct mmc_data *data = mrq->data;
+
+	if (!slot->host->use_dma || !data)
+		return;
+
+	if (data->host_cookie)
+		dma_unmap_sg(&slot->host->pdev->dev,
+				data->sg,
+				data->sg_len,
+				dw_mci_get_dma_dir(data));
+	data->host_cookie = 0;
+}
+
+static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
+{
+	int sg_len;
+	u32 temp;
 
-	sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
-			    direction);
+	host->using_dma = 0;
+
+	/* If we don't have a channel, we can't do DMA */
+	if (!host->use_dma)
+		return -ENODEV;
+
+	sg_len = dw_mci_pre_dma_transfer(host, data, 0);
+	if (sg_len < 0)
+		return sg_len;
+
+	host->using_dma = 1;
 
 	dev_vdbg(&host->pdev->dev,
 		 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
@@ -795,6 +868,8 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
 
 static const struct mmc_host_ops dw_mci_ops = {
 	.request		= dw_mci_request,
+	.pre_req		= dw_mci_pre_req,
+	.post_req		= dw_mci_post_req,
 	.set_ios		= dw_mci_set_ios,
 	.get_ro			= dw_mci_get_ro,
 	.get_cd			= dw_mci_get_cd,
-- 
1.7.0.4



^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH v2] mmc: dw_mmc: Add support for pre_req and post_req
  2012-01-26  7:32 [PATCH v2] mmc: dw_mmc: Add support for pre_req and post_req Seungwon Jeon
@ 2012-01-26  7:40 ` Jaehoon Chung
  2012-02-05  0:02 ` Chris Ball
  1 sibling, 0 replies; 3+ messages in thread
From: Jaehoon Chung @ 2012-01-26  7:40 UTC (permalink / raw)
  To: Seungwon Jeon; +Cc: linux-mmc, 'Chris Ball', 'Jaehoon Chung'

This patch can get the benefit of performance.
Looks good to me.

Tested-by: Jaehoon Chung <jh80.chung@samsung.com>

On 01/26/2012 04:32 PM, Seungwon Jeon wrote:

> This patch implements pre_req and post_req in dw_mmc
> to support asynchronous mmc request.
> 
> Signed-off-by: Seungwon Jeon <tgih.jun@samsung.com>
> ---
> Changes in v2:
> 	Consider system DMA case as well as IDMAC.
> 
> NOTE:
>     Performance gains the following.
>     Sequential read and write improve 23% and 5% respectively.
> 
>  drivers/mmc/host/dw_mmc.c |  129 +++++++++++++++++++++++++++++++++++---------
>  1 files changed, 102 insertions(+), 27 deletions(-)
> 
> diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
> index 0e34279..5c91acb 100644
> --- a/drivers/mmc/host/dw_mmc.c
> +++ b/drivers/mmc/host/dw_mmc.c
> @@ -297,14 +297,24 @@ static void dw_mci_stop_dma(struct dw_mci *host)
>  }
>  
>  #ifdef CONFIG_MMC_DW_IDMAC
> +static int dw_mci_get_dma_dir(struct mmc_data *data)
> +{
> +	if (data->flags & MMC_DATA_WRITE)
> +		return DMA_TO_DEVICE;
> +	else
> +		return DMA_FROM_DEVICE;
> +}
> +
>  static void dw_mci_dma_cleanup(struct dw_mci *host)
>  {
>  	struct mmc_data *data = host->data;
>  
>  	if (data)
> -		dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
> -			     ((data->flags & MMC_DATA_WRITE)
> -			      ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
> +		if (!data->host_cookie)
> +			dma_unmap_sg(&host->pdev->dev,
> +				     data->sg,
> +				     data->sg_len,
> +				     dw_mci_get_dma_dir(data));
>  }
>  
>  static void dw_mci_idmac_stop_dma(struct dw_mci *host)
> @@ -420,26 +430,15 @@ static int dw_mci_idmac_init(struct dw_mci *host)
>  	return 0;
>  }
>  
> -static struct dw_mci_dma_ops dw_mci_idmac_ops = {
> -	.init = dw_mci_idmac_init,
> -	.start = dw_mci_idmac_start_dma,
> -	.stop = dw_mci_idmac_stop_dma,
> -	.complete = dw_mci_idmac_complete_dma,
> -	.cleanup = dw_mci_dma_cleanup,
> -};
> -#endif /* CONFIG_MMC_DW_IDMAC */
> -
> -static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
> +static int dw_mci_pre_dma_transfer(struct dw_mci *host,
> +				   struct mmc_data *data,
> +				   bool next)
>  {
>  	struct scatterlist *sg;
> -	unsigned int i, direction, sg_len;
> -	u32 temp;
> -
> -	host->using_dma = 0;
> +	unsigned int i, sg_len;
>  
> -	/* If we don't have a channel, we can't do DMA */
> -	if (!host->use_dma)
> -		return -ENODEV;
> +	if (!next && data->host_cookie)
> +		return data->host_cookie;
>  
>  	/*
>  	 * We don't do DMA on "complex" transfers, i.e. with
> @@ -448,6 +447,7 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
>  	 */
>  	if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
>  		return -EINVAL;
> +
>  	if (data->blksz & 3)
>  		return -EINVAL;
>  
> @@ -456,15 +456,88 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
>  			return -EINVAL;
>  	}
>  
> -	host->using_dma = 1;
> +	sg_len = dma_map_sg(&host->pdev->dev,
> +			    data->sg,
> +			    data->sg_len,
> +			    dw_mci_get_dma_dir(data));
> +	if (sg_len == 0)
> +		return -EINVAL;
>  
> -	if (data->flags & MMC_DATA_READ)
> -		direction = DMA_FROM_DEVICE;
> -	else
> -		direction = DMA_TO_DEVICE;
> +	if (next)
> +		data->host_cookie = sg_len;
> +
> +	return sg_len;
> +}
> +
> +static struct dw_mci_dma_ops dw_mci_idmac_ops = {
> +	.init = dw_mci_idmac_init,
> +	.start = dw_mci_idmac_start_dma,
> +	.stop = dw_mci_idmac_stop_dma,
> +	.complete = dw_mci_idmac_complete_dma,
> +	.cleanup = dw_mci_dma_cleanup,
> +};
> +#else
> +static int dw_mci_pre_dma_transfer(struct dw_mci *host,
> +				   struct mmc_data *data,
> +				   bool next)
> +{
> +	return -ENOSYS;
> +}
> +#endif /* CONFIG_MMC_DW_IDMAC */
> +
> +static void dw_mci_pre_req(struct mmc_host *mmc,
> +			   struct mmc_request *mrq,
> +			   bool is_first_req)
> +{
> +	struct dw_mci_slot *slot = mmc_priv(mmc);
> +	struct mmc_data *data = mrq->data;
> +
> +	if (!slot->host->use_dma || !data)
> +		return;
> +
> +	if (data->host_cookie) {
> +		data->host_cookie = 0;
> +		return;
> +	}
> +
> +	if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
> +		data->host_cookie = 0;
> +}
> +
> +static void dw_mci_post_req(struct mmc_host *mmc,
> +			    struct mmc_request *mrq,
> +			    int err)
> +{
> +	struct dw_mci_slot *slot = mmc_priv(mmc);
> +	struct mmc_data *data = mrq->data;
> +
> +	if (!slot->host->use_dma || !data)
> +		return;
> +
> +	if (data->host_cookie)
> +		dma_unmap_sg(&slot->host->pdev->dev,
> +				data->sg,
> +				data->sg_len,
> +				dw_mci_get_dma_dir(data));
> +	data->host_cookie = 0;
> +}
> +
> +static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
> +{
> +	int sg_len;
> +	u32 temp;
>  
> -	sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
> -			    direction);
> +	host->using_dma = 0;
> +
> +	/* If we don't have a channel, we can't do DMA */
> +	if (!host->use_dma)
> +		return -ENODEV;
> +
> +	sg_len = dw_mci_pre_dma_transfer(host, data, 0);
> +	if (sg_len < 0)
> +		return sg_len;
> +
> +	host->using_dma = 1;
>  
>  	dev_vdbg(&host->pdev->dev,
>  		 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
> @@ -795,6 +868,8 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
>  
>  static const struct mmc_host_ops dw_mci_ops = {
>  	.request		= dw_mci_request,
> +	.pre_req		= dw_mci_pre_req,
> +	.post_req		= dw_mci_post_req,
>  	.set_ios		= dw_mci_set_ios,
>  	.get_ro			= dw_mci_get_ro,
>  	.get_cd			= dw_mci_get_cd,



^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH v2] mmc: dw_mmc: Add support for pre_req and post_req
  2012-01-26  7:32 [PATCH v2] mmc: dw_mmc: Add support for pre_req and post_req Seungwon Jeon
  2012-01-26  7:40 ` Jaehoon Chung
@ 2012-02-05  0:02 ` Chris Ball
  1 sibling, 0 replies; 3+ messages in thread
From: Chris Ball @ 2012-02-05  0:02 UTC (permalink / raw)
  To: Seungwon Jeon
  Cc: linux-mmc, 'Jaehoon Chung', Will Newton, James Hogan

Hi,

Adding Will and James for their review/ACK.  Thanks,

- Chris.

On Thu, Jan 26 2012, Seungwon Jeon wrote:
> This patch implements pre_req and post_req in dw_mmc
> to support asynchronous mmc request.
>
> Signed-off-by: Seungwon Jeon <tgih.jun@samsung.com>
> ---
> Changes in v2:
> 	Consider system DMA case as well as IDMAC.
>
> NOTE:
>     Performance gains the following.
>     Sequential read and write improve 23% and 5% respectively.
>
>  drivers/mmc/host/dw_mmc.c |  129 +++++++++++++++++++++++++++++++++++---------
>  1 files changed, 102 insertions(+), 27 deletions(-)
>
> diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
> index 0e34279..5c91acb 100644
> --- a/drivers/mmc/host/dw_mmc.c
> +++ b/drivers/mmc/host/dw_mmc.c
> @@ -297,14 +297,24 @@ static void dw_mci_stop_dma(struct dw_mci *host)
>  }
>  
>  #ifdef CONFIG_MMC_DW_IDMAC
> +static int dw_mci_get_dma_dir(struct mmc_data *data)
> +{
> +	if (data->flags & MMC_DATA_WRITE)
> +		return DMA_TO_DEVICE;
> +	else
> +		return DMA_FROM_DEVICE;
> +}
> +
>  static void dw_mci_dma_cleanup(struct dw_mci *host)
>  {
>  	struct mmc_data *data = host->data;
>  
>  	if (data)
> -		dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
> -			     ((data->flags & MMC_DATA_WRITE)
> -			      ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
> +		if (!data->host_cookie)
> +			dma_unmap_sg(&host->pdev->dev,
> +				     data->sg,
> +				     data->sg_len,
> +				     dw_mci_get_dma_dir(data));
>  }
>  
>  static void dw_mci_idmac_stop_dma(struct dw_mci *host)
> @@ -420,26 +430,15 @@ static int dw_mci_idmac_init(struct dw_mci *host)
>  	return 0;
>  }
>  
> -static struct dw_mci_dma_ops dw_mci_idmac_ops = {
> -	.init = dw_mci_idmac_init,
> -	.start = dw_mci_idmac_start_dma,
> -	.stop = dw_mci_idmac_stop_dma,
> -	.complete = dw_mci_idmac_complete_dma,
> -	.cleanup = dw_mci_dma_cleanup,
> -};
> -#endif /* CONFIG_MMC_DW_IDMAC */
> -
> -static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
> +static int dw_mci_pre_dma_transfer(struct dw_mci *host,
> +				   struct mmc_data *data,
> +				   bool next)
>  {
>  	struct scatterlist *sg;
> -	unsigned int i, direction, sg_len;
> -	u32 temp;
> -
> -	host->using_dma = 0;
> +	unsigned int i, sg_len;
>  
> -	/* If we don't have a channel, we can't do DMA */
> -	if (!host->use_dma)
> -		return -ENODEV;
> +	if (!next && data->host_cookie)
> +		return data->host_cookie;
>  
>  	/*
>  	 * We don't do DMA on "complex" transfers, i.e. with
> @@ -448,6 +447,7 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
>  	 */
>  	if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
>  		return -EINVAL;
> +
>  	if (data->blksz & 3)
>  		return -EINVAL;
>  
> @@ -456,15 +456,88 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
>  			return -EINVAL;
>  	}
>  
> -	host->using_dma = 1;
> +	sg_len = dma_map_sg(&host->pdev->dev,
> +			    data->sg,
> +			    data->sg_len,
> +			    dw_mci_get_dma_dir(data));
> +	if (sg_len == 0)
> +		return -EINVAL;
>  
> -	if (data->flags & MMC_DATA_READ)
> -		direction = DMA_FROM_DEVICE;
> -	else
> -		direction = DMA_TO_DEVICE;
> +	if (next)
> +		data->host_cookie = sg_len;
> +
> +	return sg_len;
> +}
> +
> +static struct dw_mci_dma_ops dw_mci_idmac_ops = {
> +	.init = dw_mci_idmac_init,
> +	.start = dw_mci_idmac_start_dma,
> +	.stop = dw_mci_idmac_stop_dma,
> +	.complete = dw_mci_idmac_complete_dma,
> +	.cleanup = dw_mci_dma_cleanup,
> +};
> +#else
> +static int dw_mci_pre_dma_transfer(struct dw_mci *host,
> +				   struct mmc_data *data,
> +				   bool next)
> +{
> +	return -ENOSYS;
> +}
> +#endif /* CONFIG_MMC_DW_IDMAC */
> +
> +static void dw_mci_pre_req(struct mmc_host *mmc,
> +			   struct mmc_request *mrq,
> +			   bool is_first_req)
> +{
> +	struct dw_mci_slot *slot = mmc_priv(mmc);
> +	struct mmc_data *data = mrq->data;
> +
> +	if (!slot->host->use_dma || !data)
> +		return;
> +
> +	if (data->host_cookie) {
> +		data->host_cookie = 0;
> +		return;
> +	}
> +
> +	if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
> +		data->host_cookie = 0;
> +}
> +
> +static void dw_mci_post_req(struct mmc_host *mmc,
> +			    struct mmc_request *mrq,
> +			    int err)
> +{
> +	struct dw_mci_slot *slot = mmc_priv(mmc);
> +	struct mmc_data *data = mrq->data;
> +
> +	if (!slot->host->use_dma || !data)
> +		return;
> +
> +	if (data->host_cookie)
> +		dma_unmap_sg(&slot->host->pdev->dev,
> +				data->sg,
> +				data->sg_len,
> +				dw_mci_get_dma_dir(data));
> +	data->host_cookie = 0;
> +}
> +
> +static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
> +{
> +	int sg_len;
> +	u32 temp;
>  
> -	sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
> -			    direction);
> +	host->using_dma = 0;
> +
> +	/* If we don't have a channel, we can't do DMA */
> +	if (!host->use_dma)
> +		return -ENODEV;
> +
> +	sg_len = dw_mci_pre_dma_transfer(host, data, 0);
> +	if (sg_len < 0)
> +		return sg_len;
> +
> +	host->using_dma = 1;
>  
>  	dev_vdbg(&host->pdev->dev,
>  		 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
> @@ -795,6 +868,8 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
>  
>  static const struct mmc_host_ops dw_mci_ops = {
>  	.request		= dw_mci_request,
> +	.pre_req		= dw_mci_pre_req,
> +	.post_req		= dw_mci_post_req,
>  	.set_ios		= dw_mci_set_ios,
>  	.get_ro			= dw_mci_get_ro,
>  	.get_cd			= dw_mci_get_cd,

-- 
Chris Ball   <cjb@laptop.org>   <http://printf.net/>
One Laptop Per Child

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2012-02-05  0:02 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-01-26  7:32 [PATCH v2] mmc: dw_mmc: Add support for pre_req and post_req Seungwon Jeon
2012-01-26  7:40 ` Jaehoon Chung
2012-02-05  0:02 ` Chris Ball

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).