* [PATCH] mmc: dw_mmc: Add support for pre_req and post_req
@ 2012-01-20 4:05 Seungwon Jeon
2012-01-20 7:11 ` Jaehoon Chung
0 siblings, 1 reply; 4+ messages in thread
From: Seungwon Jeon @ 2012-01-20 4:05 UTC (permalink / raw)
To: linux-mmc; +Cc: 'Chris Ball'
This patch implements pre_req and post_req in dw_mmc
to support asynchronous mmc request.
Signed-off-by: Seungwon Jeon <tgih.jun@samsung.com>
---
NOTE:
Performance gains the following.
Sequential read and write improve 23% and 5% respectively.
drivers/mmc/host/dw_mmc.c | 136 ++++++++++++++++++++++++++++++++++++---------
1 files changed, 109 insertions(+), 27 deletions(-)
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 0e34279..1fb8114 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -297,14 +297,24 @@ static void dw_mci_stop_dma(struct dw_mci *host)
}
#ifdef CONFIG_MMC_DW_IDMAC
+static int dw_mci_get_dma_dir(struct mmc_data *data)
+{
+ if (data->flags & MMC_DATA_WRITE)
+ return DMA_TO_DEVICE;
+ else
+ return DMA_FROM_DEVICE;
+}
+
static void dw_mci_dma_cleanup(struct dw_mci *host)
{
struct mmc_data *data = host->data;
if (data)
- dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
- ((data->flags & MMC_DATA_WRITE)
- ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
+ if (!data->host_cookie)
+ dma_unmap_sg(&host->pdev->dev,
+ data->sg,
+ data->sg_len,
+ dw_mci_get_dma_dir(data));
}
static void dw_mci_idmac_stop_dma(struct dw_mci *host)
@@ -420,26 +430,15 @@ static int dw_mci_idmac_init(struct dw_mci *host)
return 0;
}
-static struct dw_mci_dma_ops dw_mci_idmac_ops = {
- .init = dw_mci_idmac_init,
- .start = dw_mci_idmac_start_dma,
- .stop = dw_mci_idmac_stop_dma,
- .complete = dw_mci_idmac_complete_dma,
- .cleanup = dw_mci_dma_cleanup,
-};
-#endif /* CONFIG_MMC_DW_IDMAC */
-
-static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
+static int dw_mci_pre_dma_transfer(struct dw_mci *host,
+ struct mmc_data *data,
+ bool next)
{
struct scatterlist *sg;
- unsigned int i, direction, sg_len;
- u32 temp;
-
- host->using_dma = 0;
+ unsigned int i, sg_len;
- /* If we don't have a channel, we can't do DMA */
- if (!host->use_dma)
- return -ENODEV;
+ if (!next && data->host_cookie)
+ return data->host_cookie;
/*
* We don't do DMA on "complex" transfers, i.e. with
@@ -448,6 +447,7 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
*/
if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
return -EINVAL;
+
if (data->blksz & 3)
return -EINVAL;
@@ -456,15 +456,95 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
return -EINVAL;
}
- host->using_dma = 1;
+ sg_len = dma_map_sg(&host->pdev->dev,
+ data->sg,
+ data->sg_len,
+ dw_mci_get_dma_dir(data));
+ if (sg_len == 0)
+ return -EINVAL;
- if (data->flags & MMC_DATA_READ)
- direction = DMA_FROM_DEVICE;
- else
- direction = DMA_TO_DEVICE;
+ if (next)
+ data->host_cookie = sg_len;
+
+ return sg_len;
+}
- sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
- direction);
+static void dw_mci_pre_req(struct mmc_host *mmc,
+ struct mmc_request *mrq,
+ bool is_first_req)
+{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+
+ if (!data)
+ return;
+
+ if (data->host_cookie) {
+ data->host_cookie = 0;
+ return;
+ }
+
+ if (slot->host->use_dma) {
+ if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
+ data->host_cookie = 0;
+ }
+}
+
+static void dw_mci_post_req(struct mmc_host *mmc,
+ struct mmc_request *mrq,
+ int err)
+{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+
+ if (!data)
+ return;
+
+ if (slot->host->use_dma) {
+ if (data->host_cookie)
+ dma_unmap_sg(&slot->host->pdev->dev,
+ data->sg,
+ data->sg_len,
+ dw_mci_get_dma_dir(data));
+ data->host_cookie = 0;
+ }
+}
+
+static struct dw_mci_dma_ops dw_mci_idmac_ops = {
+ .init = dw_mci_idmac_init,
+ .start = dw_mci_idmac_start_dma,
+ .stop = dw_mci_idmac_stop_dma,
+ .complete = dw_mci_idmac_complete_dma,
+ .cleanup = dw_mci_dma_cleanup,
+};
+#else
+static int dw_mci_pre_dma_transfer(struct dw_mci *host,
+ struct mmc_data *data,
+ bool next)
+{
+ return -ENOSYS;
+}
+
+#define dw_mci_pre_req NULL
+#define dw_mci_post_req NULL
+#endif /* CONFIG_MMC_DW_IDMAC */
+
+static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
+{
+ int sg_len;
+ u32 temp;
+
+ host->using_dma = 0;
+
+ /* If we don't have a channel, we can't do DMA */
+ if (!host->use_dma)
+ return -ENODEV;
+
+ sg_len = dw_mci_pre_dma_transfer(host, data, 0);
+ if (sg_len < 0)
+ return sg_len;
+
+ host->using_dma = 1;
dev_vdbg(&host->pdev->dev,
"sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
@@ -795,6 +875,8 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
static const struct mmc_host_ops dw_mci_ops = {
.request = dw_mci_request,
+ .pre_req = dw_mci_pre_req,
+ .post_req = dw_mci_post_req,
.set_ios = dw_mci_set_ios,
.get_ro = dw_mci_get_ro,
.get_cd = dw_mci_get_cd,
--
1.7.0.4
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH] mmc: dw_mmc: Add support for pre_req and post_req
2012-01-20 4:05 [PATCH] mmc: dw_mmc: Add support for pre_req and post_req Seungwon Jeon
@ 2012-01-20 7:11 ` Jaehoon Chung
2012-01-25 5:12 ` Seungwon Jeon
0 siblings, 1 reply; 4+ messages in thread
From: Jaehoon Chung @ 2012-01-20 7:11 UTC (permalink / raw)
To: Seungwon Jeon; +Cc: linux-mmc, 'Chris Ball'
Hi Mr Jeon..
Are Pre_req and post_req used with only the IDMAC..?
On 01/20/2012 01:05 PM, Seungwon Jeon wrote:
> This patch implements pre_req and post_req in dw_mmc
> to support asynchronous mmc request.
>
> Signed-off-by: Seungwon Jeon <tgih.jun@samsung.com>
> ---
> NOTE:
> Performance gains the following.
> Sequential read and write improve 23% and 5% respectively.
>
> drivers/mmc/host/dw_mmc.c | 136 ++++++++++++++++++++++++++++++++++++---------
> 1 files changed, 109 insertions(+), 27 deletions(-)
>
> diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
> index 0e34279..1fb8114 100644
> --- a/drivers/mmc/host/dw_mmc.c
> +++ b/drivers/mmc/host/dw_mmc.c
> @@ -297,14 +297,24 @@ static void dw_mci_stop_dma(struct dw_mci *host)
> }
>
> #ifdef CONFIG_MMC_DW_IDMAC
> +static int dw_mci_get_dma_dir(struct mmc_data *data)
> +{
> + if (data->flags & MMC_DATA_WRITE)
> + return DMA_TO_DEVICE;
> + else
> + return DMA_FROM_DEVICE;
> +}
> +
> static void dw_mci_dma_cleanup(struct dw_mci *host)
> {
> struct mmc_data *data = host->data;
>
> if (data)
> - dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
> - ((data->flags & MMC_DATA_WRITE)
> - ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
> + if (!data->host_cookie)
> + dma_unmap_sg(&host->pdev->dev,
> + data->sg,
> + data->sg_len,
> + dw_mci_get_dma_dir(data));
> }
if (!data->host_cookie)
dma_unmap_sg(&host->pdev->dev,
data->sg, data->sg_len,
dw_mci_get_dma_dir(data));
also the below code..
Best Regards,
Jaehoon Chung
>
> static void dw_mci_idmac_stop_dma(struct dw_mci *host)
> @@ -420,26 +430,15 @@ static int dw_mci_idmac_init(struct dw_mci *host)
> return 0;
> }
>
> -static struct dw_mci_dma_ops dw_mci_idmac_ops = {
> - .init = dw_mci_idmac_init,
> - .start = dw_mci_idmac_start_dma,
> - .stop = dw_mci_idmac_stop_dma,
> - .complete = dw_mci_idmac_complete_dma,
> - .cleanup = dw_mci_dma_cleanup,
> -};
> -#endif /* CONFIG_MMC_DW_IDMAC */
> -
> -static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
> +static int dw_mci_pre_dma_transfer(struct dw_mci *host,
> + struct mmc_data *data,
> + bool next)
> {
> struct scatterlist *sg;
> - unsigned int i, direction, sg_len;
> - u32 temp;
> -
> - host->using_dma = 0;
> + unsigned int i, sg_len;
>
> - /* If we don't have a channel, we can't do DMA */
> - if (!host->use_dma)
> - return -ENODEV;
> + if (!next && data->host_cookie)
> + return data->host_cookie;
>
> /*
> * We don't do DMA on "complex" transfers, i.e. with
> @@ -448,6 +447,7 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
> */
> if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
> return -EINVAL;
> +
> if (data->blksz & 3)
> return -EINVAL;
>
> @@ -456,15 +456,95 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
> return -EINVAL;
> }
>
> - host->using_dma = 1;
> + sg_len = dma_map_sg(&host->pdev->dev,
> + data->sg,
> + data->sg_len,
> + dw_mci_get_dma_dir(data));
> + if (sg_len == 0)
> + return -EINVAL;
>
> - if (data->flags & MMC_DATA_READ)
> - direction = DMA_FROM_DEVICE;
> - else
> - direction = DMA_TO_DEVICE;
> + if (next)
> + data->host_cookie = sg_len;
> +
> + return sg_len;
> +}
>
> - sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
> - direction);
> +static void dw_mci_pre_req(struct mmc_host *mmc,
> + struct mmc_request *mrq,
> + bool is_first_req)
> +{
> + struct dw_mci_slot *slot = mmc_priv(mmc);
> + struct mmc_data *data = mrq->data;
> +
> + if (!data)
> + return;
> +
> + if (data->host_cookie) {
> + data->host_cookie = 0;
> + return;
> + }
> +
> + if (slot->host->use_dma) {
> + if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
> + data->host_cookie = 0;
> + }
> +}
> +
> +static void dw_mci_post_req(struct mmc_host *mmc,
> + struct mmc_request *mrq,
> + int err)
> +{
> + struct dw_mci_slot *slot = mmc_priv(mmc);
> + struct mmc_data *data = mrq->data;
> +
> + if (!data)
> + return;
> +
> + if (slot->host->use_dma) {
> + if (data->host_cookie)
> + dma_unmap_sg(&slot->host->pdev->dev,
> + data->sg,
> + data->sg_len,
> + dw_mci_get_dma_dir(data));
> + data->host_cookie = 0;
> + }
> +}
> +
> +static struct dw_mci_dma_ops dw_mci_idmac_ops = {
> + .init = dw_mci_idmac_init,
> + .start = dw_mci_idmac_start_dma,
> + .stop = dw_mci_idmac_stop_dma,
> + .complete = dw_mci_idmac_complete_dma,
> + .cleanup = dw_mci_dma_cleanup,
> +};
> +#else
> +static int dw_mci_pre_dma_transfer(struct dw_mci *host,
> + struct mmc_data *data,
> + bool next)
> +{
> + return -ENOSYS;
> +}
> +
> +#define dw_mci_pre_req NULL
> +#define dw_mci_post_req NULL
> +#endif /* CONFIG_MMC_DW_IDMAC */
> +
> +static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
> +{
> + int sg_len;
> + u32 temp;
> +
> + host->using_dma = 0;
> +
> + /* If we don't have a channel, we can't do DMA */
> + if (!host->use_dma)
> + return -ENODEV;
> +
> + sg_len = dw_mci_pre_dma_transfer(host, data, 0);
> + if (sg_len < 0)
> + return sg_len;
> +
> + host->using_dma = 1;
>
> dev_vdbg(&host->pdev->dev,
> "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
> @@ -795,6 +875,8 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
>
> static const struct mmc_host_ops dw_mci_ops = {
> .request = dw_mci_request,
> + .pre_req = dw_mci_pre_req,
> + .post_req = dw_mci_post_req,
> .set_ios = dw_mci_set_ios,
> .get_ro = dw_mci_get_ro,
> .get_cd = dw_mci_get_cd,
^ permalink raw reply [flat|nested] 4+ messages in thread
* RE: [PATCH] mmc: dw_mmc: Add support for pre_req and post_req
2012-01-20 7:11 ` Jaehoon Chung
@ 2012-01-25 5:12 ` Seungwon Jeon
2012-01-25 8:17 ` Jaehoon Chung
0 siblings, 1 reply; 4+ messages in thread
From: Seungwon Jeon @ 2012-01-25 5:12 UTC (permalink / raw)
To: 'Jaehoon Chung'; +Cc: linux-mmc, 'Chris Ball'
Jaehoon Chung <jh80.chung@samsung.com> wrote:
> Hi Mr Jeon..
>
> Are Pre_req and post_req used with only the IDMAC..?
Currently internal dma is considered.
Do you use system dma operation?
>
> On 01/20/2012 01:05 PM, Seungwon Jeon wrote:
>
> > This patch implements pre_req and post_req in dw_mmc
> > to support asynchronous mmc request.
> >
> > Signed-off-by: Seungwon Jeon <tgih.jun@samsung.com>
> > ---
> > NOTE:
> > Performance gains the following.
> > Sequential read and write improve 23% and 5% respectively.
> >
> > drivers/mmc/host/dw_mmc.c | 136 ++++++++++++++++++++++++++++++++++++---------
> > 1 files changed, 109 insertions(+), 27 deletions(-)
> >
> > diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
> > index 0e34279..1fb8114 100644
> > --- a/drivers/mmc/host/dw_mmc.c
> > +++ b/drivers/mmc/host/dw_mmc.c
> > @@ -297,14 +297,24 @@ static void dw_mci_stop_dma(struct dw_mci *host)
> > }
> >
> > #ifdef CONFIG_MMC_DW_IDMAC
> > +static int dw_mci_get_dma_dir(struct mmc_data *data)
> > +{
> > + if (data->flags & MMC_DATA_WRITE)
> > + return DMA_TO_DEVICE;
> > + else
> > + return DMA_FROM_DEVICE;
> > +}
> > +
> > static void dw_mci_dma_cleanup(struct dw_mci *host)
> > {
> > struct mmc_data *data = host->data;
> >
> > if (data)
> > - dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
> > - ((data->flags & MMC_DATA_WRITE)
> > - ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
> > + if (!data->host_cookie)
> > + dma_unmap_sg(&host->pdev->dev,
> > + data->sg,
> > + data->sg_len,
> > + dw_mci_get_dma_dir(data));
> > }
>
> if (!data->host_cookie)
> dma_unmap_sg(&host->pdev->dev,
> data->sg, data->sg_len,
> dw_mci_get_dma_dir(data));
> also the below code..
Sorry... What's mean?
Thanks,
Seungwon Jeon.
>
> Best Regards,
> Jaehoon Chung
>
> >
> > static void dw_mci_idmac_stop_dma(struct dw_mci *host)
> > @@ -420,26 +430,15 @@ static int dw_mci_idmac_init(struct dw_mci *host)
> > return 0;
> > }
> >
> > -static struct dw_mci_dma_ops dw_mci_idmac_ops = {
> > - .init = dw_mci_idmac_init,
> > - .start = dw_mci_idmac_start_dma,
> > - .stop = dw_mci_idmac_stop_dma,
> > - .complete = dw_mci_idmac_complete_dma,
> > - .cleanup = dw_mci_dma_cleanup,
> > -};
> > -#endif /* CONFIG_MMC_DW_IDMAC */
> > -
> > -static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
> > +static int dw_mci_pre_dma_transfer(struct dw_mci *host,
> > + struct mmc_data *data,
> > + bool next)
> > {
> > struct scatterlist *sg;
> > - unsigned int i, direction, sg_len;
> > - u32 temp;
> > -
> > - host->using_dma = 0;
> > + unsigned int i, sg_len;
> >
> > - /* If we don't have a channel, we can't do DMA */
> > - if (!host->use_dma)
> > - return -ENODEV;
> > + if (!next && data->host_cookie)
> > + return data->host_cookie;
> >
> > /*
> > * We don't do DMA on "complex" transfers, i.e. with
> > @@ -448,6 +447,7 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
> > */
> > if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
> > return -EINVAL;
> > +
> > if (data->blksz & 3)
> > return -EINVAL;
> >
> > @@ -456,15 +456,95 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
> > return -EINVAL;
> > }
> >
> > - host->using_dma = 1;
> > + sg_len = dma_map_sg(&host->pdev->dev,
> > + data->sg,
> > + data->sg_len,
> > + dw_mci_get_dma_dir(data));
> > + if (sg_len == 0)
> > + return -EINVAL;
> >
> > - if (data->flags & MMC_DATA_READ)
> > - direction = DMA_FROM_DEVICE;
> > - else
> > - direction = DMA_TO_DEVICE;
> > + if (next)
> > + data->host_cookie = sg_len;
> > +
> > + return sg_len;
> > +}
> >
> > - sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
> > - direction);
> > +static void dw_mci_pre_req(struct mmc_host *mmc,
> > + struct mmc_request *mrq,
> > + bool is_first_req)
> > +{
> > + struct dw_mci_slot *slot = mmc_priv(mmc);
> > + struct mmc_data *data = mrq->data;
> > +
> > + if (!data)
> > + return;
> > +
> > + if (data->host_cookie) {
> > + data->host_cookie = 0;
> > + return;
> > + }
> > +
> > + if (slot->host->use_dma) {
> > + if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
> > + data->host_cookie = 0;
> > + }
> > +}
> > +
> > +static void dw_mci_post_req(struct mmc_host *mmc,
> > + struct mmc_request *mrq,
> > + int err)
> > +{
> > + struct dw_mci_slot *slot = mmc_priv(mmc);
> > + struct mmc_data *data = mrq->data;
> > +
> > + if (!data)
> > + return;
> > +
> > + if (slot->host->use_dma) {
> > + if (data->host_cookie)
> > + dma_unmap_sg(&slot->host->pdev->dev,
> > + data->sg,
> > + data->sg_len,
> > + dw_mci_get_dma_dir(data));
> > + data->host_cookie = 0;
> > + }
> > +}
> > +
> > +static struct dw_mci_dma_ops dw_mci_idmac_ops = {
> > + .init = dw_mci_idmac_init,
> > + .start = dw_mci_idmac_start_dma,
> > + .stop = dw_mci_idmac_stop_dma,
> > + .complete = dw_mci_idmac_complete_dma,
> > + .cleanup = dw_mci_dma_cleanup,
> > +};
> > +#else
> > +static int dw_mci_pre_dma_transfer(struct dw_mci *host,
> > + struct mmc_data *data,
> > + bool next)
> > +{
> > + return -ENOSYS;
> > +}
> > +
> > +#define dw_mci_pre_req NULL
> > +#define dw_mci_post_req NULL
> > +#endif /* CONFIG_MMC_DW_IDMAC */
> > +
> > +static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
> > +{
> > + int sg_len;
> > + u32 temp;
> > +
> > + host->using_dma = 0;
> > +
> > + /* If we don't have a channel, we can't do DMA */
> > + if (!host->use_dma)
> > + return -ENODEV;
> > +
> > + sg_len = dw_mci_pre_dma_transfer(host, data, 0);
> > + if (sg_len < 0)
> > + return sg_len;
> > +
> > + host->using_dma = 1;
> >
> > dev_vdbg(&host->pdev->dev,
> > "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
> > @@ -795,6 +875,8 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
> >
> > static const struct mmc_host_ops dw_mci_ops = {
> > .request = dw_mci_request,
> > + .pre_req = dw_mci_pre_req,
> > + .post_req = dw_mci_post_req,
> > .set_ios = dw_mci_set_ios,
> > .get_ro = dw_mci_get_ro,
> > .get_cd = dw_mci_get_cd,
>
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-mmc" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH] mmc: dw_mmc: Add support for pre_req and post_req
2012-01-25 5:12 ` Seungwon Jeon
@ 2012-01-25 8:17 ` Jaehoon Chung
0 siblings, 0 replies; 4+ messages in thread
From: Jaehoon Chung @ 2012-01-25 8:17 UTC (permalink / raw)
To: Seungwon Jeon; +Cc: 'Jaehoon Chung', linux-mmc, 'Chris Ball'
On 01/25/2012 02:12 PM, Seungwon Jeon wrote:
> Jaehoon Chung <jh80.chung@samsung.com> wrote:
>> Hi Mr Jeon..
>>
>> Are Pre_req and post_req used with only the IDMAC..?
> Currently internal dma is considered.
> Do you use system dma operation?
I used the IDMAC.
My means...other dma system can also use this feature, right?
So i just asked to you..
Best Regards,
Jaehoon Chung
>
>>
>> On 01/20/2012 01:05 PM, Seungwon Jeon wrote:
>>
>>> This patch implements pre_req and post_req in dw_mmc
>>> to support asynchronous mmc request.
>>>
>>> Signed-off-by: Seungwon Jeon <tgih.jun@samsung.com>
>>> ---
>>> NOTE:
>>> Performance gains the following.
>>> Sequential read and write improve 23% and 5% respectively.
>>>
>>> drivers/mmc/host/dw_mmc.c | 136 ++++++++++++++++++++++++++++++++++++---------
>>> 1 files changed, 109 insertions(+), 27 deletions(-)
>>>
>>> diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
>>> index 0e34279..1fb8114 100644
>>> --- a/drivers/mmc/host/dw_mmc.c
>>> +++ b/drivers/mmc/host/dw_mmc.c
>>> @@ -297,14 +297,24 @@ static void dw_mci_stop_dma(struct dw_mci *host)
>>> }
>>>
>>> #ifdef CONFIG_MMC_DW_IDMAC
>>> +static int dw_mci_get_dma_dir(struct mmc_data *data)
>>> +{
>>> + if (data->flags & MMC_DATA_WRITE)
>>> + return DMA_TO_DEVICE;
>>> + else
>>> + return DMA_FROM_DEVICE;
>>> +}
>>> +
>>> static void dw_mci_dma_cleanup(struct dw_mci *host)
>>> {
>>> struct mmc_data *data = host->data;
>>>
>>> if (data)
>>> - dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
>>> - ((data->flags & MMC_DATA_WRITE)
>>> - ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
>>> + if (!data->host_cookie)
>>> + dma_unmap_sg(&host->pdev->dev,
>>> + data->sg,
>>> + data->sg_len,
>>> + dw_mci_get_dma_dir(data));
>>> }
>>
>> if (!data->host_cookie)
>> dma_unmap_sg(&host->pdev->dev,
>> data->sg, data->sg_len,
>> dw_mci_get_dma_dir(data));
>> also the below code..
> Sorry... What's mean?
>
> Thanks,
> Seungwon Jeon.
>>
>> Best Regards,
>> Jaehoon Chung
>>
>>>
>>> static void dw_mci_idmac_stop_dma(struct dw_mci *host)
>>> @@ -420,26 +430,15 @@ static int dw_mci_idmac_init(struct dw_mci *host)
>>> return 0;
>>> }
>>>
>>> -static struct dw_mci_dma_ops dw_mci_idmac_ops = {
>>> - .init = dw_mci_idmac_init,
>>> - .start = dw_mci_idmac_start_dma,
>>> - .stop = dw_mci_idmac_stop_dma,
>>> - .complete = dw_mci_idmac_complete_dma,
>>> - .cleanup = dw_mci_dma_cleanup,
>>> -};
>>> -#endif /* CONFIG_MMC_DW_IDMAC */
>>> -
>>> -static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
>>> +static int dw_mci_pre_dma_transfer(struct dw_mci *host,
>>> + struct mmc_data *data,
>>> + bool next)
>>> {
>>> struct scatterlist *sg;
>>> - unsigned int i, direction, sg_len;
>>> - u32 temp;
>>> -
>>> - host->using_dma = 0;
>>> + unsigned int i, sg_len;
>>>
>>> - /* If we don't have a channel, we can't do DMA */
>>> - if (!host->use_dma)
>>> - return -ENODEV;
>>> + if (!next && data->host_cookie)
>>> + return data->host_cookie;
>>>
>>> /*
>>> * We don't do DMA on "complex" transfers, i.e. with
>>> @@ -448,6 +447,7 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
>>> */
>>> if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
>>> return -EINVAL;
>>> +
>>> if (data->blksz & 3)
>>> return -EINVAL;
>>>
>>> @@ -456,15 +456,95 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
>>> return -EINVAL;
>>> }
>>>
>>> - host->using_dma = 1;
>>> + sg_len = dma_map_sg(&host->pdev->dev,
>>> + data->sg,
>>> + data->sg_len,
>>> + dw_mci_get_dma_dir(data));
>>> + if (sg_len == 0)
>>> + return -EINVAL;
>>>
>>> - if (data->flags & MMC_DATA_READ)
>>> - direction = DMA_FROM_DEVICE;
>>> - else
>>> - direction = DMA_TO_DEVICE;
>>> + if (next)
>>> + data->host_cookie = sg_len;
>>> +
>>> + return sg_len;
>>> +}
>>>
>>> - sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
>>> - direction);
>>> +static void dw_mci_pre_req(struct mmc_host *mmc,
>>> + struct mmc_request *mrq,
>>> + bool is_first_req)
>>> +{
>>> + struct dw_mci_slot *slot = mmc_priv(mmc);
>>> + struct mmc_data *data = mrq->data;
>>> +
>>> + if (!data)
>>> + return;
>>> +
>>> + if (data->host_cookie) {
>>> + data->host_cookie = 0;
>>> + return;
>>> + }
>>> +
>>> + if (slot->host->use_dma) {
>>> + if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
>>> + data->host_cookie = 0;
>>> + }
>>> +}
>>> +
>>> +static void dw_mci_post_req(struct mmc_host *mmc,
>>> + struct mmc_request *mrq,
>>> + int err)
>>> +{
>>> + struct dw_mci_slot *slot = mmc_priv(mmc);
>>> + struct mmc_data *data = mrq->data;
>>> +
>>> + if (!data)
>>> + return;
>>> +
>>> + if (slot->host->use_dma) {
>>> + if (data->host_cookie)
>>> + dma_unmap_sg(&slot->host->pdev->dev,
>>> + data->sg,
>>> + data->sg_len,
>>> + dw_mci_get_dma_dir(data));
>>> + data->host_cookie = 0;
>>> + }
>>> +}
>>> +
>>> +static struct dw_mci_dma_ops dw_mci_idmac_ops = {
>>> + .init = dw_mci_idmac_init,
>>> + .start = dw_mci_idmac_start_dma,
>>> + .stop = dw_mci_idmac_stop_dma,
>>> + .complete = dw_mci_idmac_complete_dma,
>>> + .cleanup = dw_mci_dma_cleanup,
>>> +};
>>> +#else
>>> +static int dw_mci_pre_dma_transfer(struct dw_mci *host,
>>> + struct mmc_data *data,
>>> + bool next)
>>> +{
>>> + return -ENOSYS;
>>> +}
>>> +
>>> +#define dw_mci_pre_req NULL
>>> +#define dw_mci_post_req NULL
>>> +#endif /* CONFIG_MMC_DW_IDMAC */
>>> +
>>> +static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
>>> +{
>>> + int sg_len;
>>> + u32 temp;
>>> +
>>> + host->using_dma = 0;
>>> +
>>> + /* If we don't have a channel, we can't do DMA */
>>> + if (!host->use_dma)
>>> + return -ENODEV;
>>> +
>>> + sg_len = dw_mci_pre_dma_transfer(host, data, 0);
>>> + if (sg_len < 0)
>>> + return sg_len;
>>> +
>>> + host->using_dma = 1;
>>>
>>> dev_vdbg(&host->pdev->dev,
>>> "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
>>> @@ -795,6 +875,8 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
>>>
>>> static const struct mmc_host_ops dw_mci_ops = {
>>> .request = dw_mci_request,
>>> + .pre_req = dw_mci_pre_req,
>>> + .post_req = dw_mci_post_req,
>>> .set_ios = dw_mci_set_ios,
>>> .get_ro = dw_mci_get_ro,
>>> .get_cd = dw_mci_get_cd,
>>
>>
>> --
>> To unsubscribe from this list: send the line "unsubscribe linux-mmc" in
>> the body of a message to majordomo@vger.kernel.org
>> More majordomo info at http://vger.kernel.org/majordomo-info.html
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-mmc" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at http://vger.kernel.org/majordomo-info.html
>
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2012-01-25 8:17 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-01-20 4:05 [PATCH] mmc: dw_mmc: Add support for pre_req and post_req Seungwon Jeon
2012-01-20 7:11 ` Jaehoon Chung
2012-01-25 5:12 ` Seungwon Jeon
2012-01-25 8:17 ` Jaehoon Chung
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).