From: Wolfram Sang <wsa+renesas@sang-engineering.com>
To: linux-mmc@vger.kernel.org
Cc: linux-renesas-soc@vger.kernel.org,
Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>,
Wolfram Sang <wsa+renesas@sang-engineering.com>
Subject: [PATCH RFC 1/6] mmc: renesas_sdhi: remove accessor function for internal_dmac
Date: Thu, 6 Oct 2022 21:04:47 +0200 [thread overview]
Message-ID: <20221006190452.5316-2-wsa+renesas@sang-engineering.com> (raw)
In-Reply-To: <20221006190452.5316-1-wsa+renesas@sang-engineering.com>
This accessor function does not help readability but makes it worse.
Because I soon need to read from the registers as well and don't want to
add another function like this, I chose to remove the existing one and
use the accessor directly. I also switch from writeq to writel because
no 64 bit register is actually involved.
Signed-off-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
---
drivers/mmc/host/renesas_sdhi_internal_dmac.c | 31 +++++--------------
1 file changed, 8 insertions(+), 23 deletions(-)
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 42937596c4c4..48bea0bd75e8 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -279,13 +279,6 @@ static const struct of_device_id renesas_sdhi_internal_dmac_of_match[] = {
};
MODULE_DEVICE_TABLE(of, renesas_sdhi_internal_dmac_of_match);
-static void
-renesas_sdhi_internal_dmac_dm_write(struct tmio_mmc_host *host,
- int addr, u64 val)
-{
- writeq(val, host->ctl + addr);
-}
-
static void
renesas_sdhi_internal_dmac_enable_dma(struct tmio_mmc_host *host, bool enable)
{
@@ -295,8 +288,7 @@ renesas_sdhi_internal_dmac_enable_dma(struct tmio_mmc_host *host, bool enable)
return;
if (!enable)
- renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO1,
- INFO1_CLEAR);
+ writel(INFO1_CLEAR, host->ctl + DM_CM_INFO1);
if (priv->dma_priv.enable)
priv->dma_priv.enable(host, enable);
@@ -309,10 +301,8 @@ renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host)
renesas_sdhi_internal_dmac_enable_dma(host, false);
- renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST,
- RST_RESERVED_BITS & ~val);
- renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST,
- RST_RESERVED_BITS | val);
+ writel(RST_RESERVED_BITS & ~val, host->ctl + DM_CM_RST);
+ writel(RST_RESERVED_BITS | val, host->ctl + DM_CM_RST);
clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
@@ -397,10 +387,8 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
renesas_sdhi_internal_dmac_enable_dma(host, true);
/* set dma parameters */
- renesas_sdhi_internal_dmac_dm_write(host, DM_CM_DTRAN_MODE,
- dtran_mode);
- renesas_sdhi_internal_dmac_dm_write(host, DM_DTRAN_ADDR,
- sg_dma_address(sg));
+ writel(dtran_mode, host->ctl + DM_CM_DTRAN_MODE);
+ writel(sg_dma_address(sg), host->ctl + DM_DTRAN_ADDR);
host->dma_on = true;
@@ -420,8 +408,7 @@ static void renesas_sdhi_internal_dmac_issue_tasklet_fn(unsigned long arg)
tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
/* start the DMAC */
- renesas_sdhi_internal_dmac_dm_write(host, DM_CM_DTRAN_CTRL,
- DTRAN_CTRL_DM_START);
+ writel(DTRAN_CTRL_DM_START, host->ctl + DM_CM_DTRAN_CTRL);
}
static bool renesas_sdhi_internal_dmac_complete(struct tmio_mmc_host *host)
@@ -502,10 +489,8 @@ renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host *host,
struct renesas_sdhi *priv = host_to_priv(host);
/* Disable DMAC interrupts, we don't use them */
- renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO1_MASK,
- INFO1_MASK_CLEAR);
- renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO2_MASK,
- INFO2_MASK_CLEAR);
+ writel(INFO1_MASK_CLEAR, host->ctl + DM_CM_INFO1_MASK);
+ writel(INFO2_MASK_CLEAR, host->ctl + DM_CM_INFO2_MASK);
/* Each value is set to non-zero to assume "enabling" each DMA */
host->chan_rx = host->chan_tx = (void *)0xdeadbeaf;
--
2.35.1
next prev parent reply other threads:[~2022-10-06 19:05 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-10-06 19:04 [PATCH RFC 0/6] mmc: renesas_sdhi: add support for DMA end irqs Wolfram Sang
2022-10-06 19:04 ` Wolfram Sang [this message]
2022-10-06 19:04 ` [PATCH RFC 2/6] mmc: renesas_sdhi: improve naming of DMA struct Wolfram Sang
2022-10-06 19:04 ` [PATCH RFC 3/6] mmc: tmio: add callback for dma irq Wolfram Sang
2022-10-06 19:04 ` [PATCH RFC 4/6] mmc: renesas_sdhi: add quirk for broken register layout Wolfram Sang
2022-10-06 19:04 ` [PATCH RFC 5/6] mmc: renesas_sdhi: take DMA end interrupts into account Wolfram Sang
2022-10-07 7:45 ` Geert Uytterhoeven
2022-10-07 8:13 ` Wolfram Sang
2022-10-06 19:04 ` [PATCH 6/6] DEBUG mmc: renesas_sdhi: debug end_flags for DMA Wolfram Sang
2022-10-25 10:51 ` [PATCH RFC 0/6] mmc: renesas_sdhi: add support for DMA end irqs Ulf Hansson
2022-10-25 11:04 ` Arnd Bergmann
2022-10-25 12:32 ` Ulf Hansson
2022-11-02 19:14 ` Wolfram Sang
2022-11-03 15:04 ` Ulf Hansson
2022-11-03 18:23 ` Wolfram Sang
2022-11-18 0:49 ` Yoshihiro Shimoda
2022-11-18 20:55 ` Wolfram Sang
2022-11-18 9:45 ` Ulf Hansson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221006190452.5316-2-wsa+renesas@sang-engineering.com \
--to=wsa+renesas@sang-engineering.com \
--cc=linux-mmc@vger.kernel.org \
--cc=linux-renesas-soc@vger.kernel.org \
--cc=yoshihiro.shimoda.uh@renesas.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).