From: Lucas Stach <l.stach@pengutronix.de>
To: etnaviv@lists.freedesktop.org
Cc: patchwork-lst@pengutronix.de, kernel@pengutronix.de,
dri-devel@lists.freedesktop.org,
Russell King <linux+etnaviv@armlinux.org.uk>
Subject: [PATCH v2 1/2] drm/etnaviv: switch MMU page tables to writecombine memory
Date: Tue, 8 May 2018 15:23:15 +0200 [thread overview]
Message-ID: <20180508132316.22018-1-l.stach@pengutronix.de> (raw)
We are likely to write multiple page entries at once and already ensure
proper write buffer flushing before GPU submit, so this improves CPU
time usage in the submit path without any downsides.
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
---
v2: use shorter _wc function names.
---
drivers/gpu/drm/etnaviv/etnaviv_iommu.c | 34 +++++-----
drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c | 74 ++++++++++------------
2 files changed, 49 insertions(+), 59 deletions(-)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
index 4b9b11ca6f03..4ada19054443 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
@@ -47,11 +47,10 @@ static int __etnaviv_iommu_init(struct etnaviv_iommuv1_domain *etnaviv_domain)
u32 *p;
int i;
- etnaviv_domain->base.bad_page_cpu = dma_alloc_coherent(
- etnaviv_domain->base.dev,
- SZ_4K,
- &etnaviv_domain->base.bad_page_dma,
- GFP_KERNEL);
+ etnaviv_domain->base.bad_page_cpu =
+ dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
+ &etnaviv_domain->base.bad_page_dma,
+ GFP_KERNEL);
if (!etnaviv_domain->base.bad_page_cpu)
return -ENOMEM;
@@ -59,14 +58,14 @@ static int __etnaviv_iommu_init(struct etnaviv_iommuv1_domain *etnaviv_domain)
for (i = 0; i < SZ_4K / 4; i++)
*p++ = 0xdead55aa;
- etnaviv_domain->pgtable_cpu =
- dma_alloc_coherent(etnaviv_domain->base.dev, PT_SIZE,
- &etnaviv_domain->pgtable_dma,
- GFP_KERNEL);
+ etnaviv_domain->pgtable_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
+ PT_SIZE,
+ &etnaviv_domain->pgtable_dma,
+ GFP_KERNEL);
if (!etnaviv_domain->pgtable_cpu) {
- dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
- etnaviv_domain->base.bad_page_cpu,
- etnaviv_domain->base.bad_page_dma);
+ dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+ etnaviv_domain->base.bad_page_cpu,
+ etnaviv_domain->base.bad_page_dma);
return -ENOMEM;
}
@@ -81,13 +80,12 @@ static void etnaviv_iommuv1_domain_free(struct etnaviv_iommu_domain *domain)
struct etnaviv_iommuv1_domain *etnaviv_domain =
to_etnaviv_domain(domain);
- dma_free_coherent(etnaviv_domain->base.dev, PT_SIZE,
- etnaviv_domain->pgtable_cpu,
- etnaviv_domain->pgtable_dma);
+ dma_free_wc(etnaviv_domain->base.dev, PT_SIZE,
+ etnaviv_domain->pgtable_cpu, etnaviv_domain->pgtable_dma);
- dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
- etnaviv_domain->base.bad_page_cpu,
- etnaviv_domain->base.bad_page_dma);
+ dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+ etnaviv_domain->base.bad_page_cpu,
+ etnaviv_domain->base.bad_page_dma);
kfree(etnaviv_domain);
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
index 9752dbd5d28b..47785d61cd95 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -104,11 +104,10 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
int ret, i, j;
/* allocate scratch page */
- etnaviv_domain->base.bad_page_cpu = dma_alloc_coherent(
- etnaviv_domain->base.dev,
- SZ_4K,
- &etnaviv_domain->base.bad_page_dma,
- GFP_KERNEL);
+ etnaviv_domain->base.bad_page_cpu =
+ dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
+ &etnaviv_domain->base.bad_page_dma,
+ GFP_KERNEL);
if (!etnaviv_domain->base.bad_page_cpu) {
ret = -ENOMEM;
goto fail_mem;
@@ -117,19 +116,17 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
for (i = 0; i < SZ_4K / 4; i++)
*p++ = 0xdead55aa;
- etnaviv_domain->pta_cpu = dma_alloc_coherent(etnaviv_domain->base.dev,
- SZ_4K,
- &etnaviv_domain->pta_dma,
- GFP_KERNEL);
+ etnaviv_domain->pta_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
+ SZ_4K, &etnaviv_domain->pta_dma,
+ GFP_KERNEL);
if (!etnaviv_domain->pta_cpu) {
ret = -ENOMEM;
goto fail_mem;
}
- etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->base.dev,
- SZ_4K,
- &etnaviv_domain->mtlb_dma,
- GFP_KERNEL);
+ etnaviv_domain->mtlb_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
+ SZ_4K, &etnaviv_domain->mtlb_dma,
+ GFP_KERNEL);
if (!etnaviv_domain->mtlb_cpu) {
ret = -ENOMEM;
goto fail_mem;
@@ -138,10 +135,9 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
/* pre-populate STLB pages (may want to switch to on-demand later) */
for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
etnaviv_domain->stlb_cpu[i] =
- dma_alloc_coherent(etnaviv_domain->base.dev,
- SZ_4K,
- &etnaviv_domain->stlb_dma[i],
- GFP_KERNEL);
+ dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
+ &etnaviv_domain->stlb_dma[i],
+ GFP_KERNEL);
if (!etnaviv_domain->stlb_cpu[i]) {
ret = -ENOMEM;
goto fail_mem;
@@ -158,25 +154,23 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
fail_mem:
if (etnaviv_domain->base.bad_page_cpu)
- dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
- etnaviv_domain->base.bad_page_cpu,
- etnaviv_domain->base.bad_page_dma);
+ dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+ etnaviv_domain->base.bad_page_cpu,
+ etnaviv_domain->base.bad_page_dma);
if (etnaviv_domain->pta_cpu)
- dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
- etnaviv_domain->pta_cpu,
- etnaviv_domain->pta_dma);
+ dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+ etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma);
if (etnaviv_domain->mtlb_cpu)
- dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
- etnaviv_domain->mtlb_cpu,
- etnaviv_domain->mtlb_dma);
+ dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+ etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma);
for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
if (etnaviv_domain->stlb_cpu[i])
- dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
- etnaviv_domain->stlb_cpu[i],
- etnaviv_domain->stlb_dma[i]);
+ dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+ etnaviv_domain->stlb_cpu[i],
+ etnaviv_domain->stlb_dma[i]);
}
return ret;
@@ -188,23 +182,21 @@ static void etnaviv_iommuv2_domain_free(struct etnaviv_iommu_domain *domain)
to_etnaviv_domain(domain);
int i;
- dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
- etnaviv_domain->base.bad_page_cpu,
- etnaviv_domain->base.bad_page_dma);
+ dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+ etnaviv_domain->base.bad_page_cpu,
+ etnaviv_domain->base.bad_page_dma);
- dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
- etnaviv_domain->pta_cpu,
- etnaviv_domain->pta_dma);
+ dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+ etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma);
- dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
- etnaviv_domain->mtlb_cpu,
- etnaviv_domain->mtlb_dma);
+ dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+ etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma);
for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
if (etnaviv_domain->stlb_cpu[i])
- dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
- etnaviv_domain->stlb_cpu[i],
- etnaviv_domain->stlb_dma[i]);
+ dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+ etnaviv_domain->stlb_cpu[i],
+ etnaviv_domain->stlb_dma[i]);
}
vfree(etnaviv_domain);
--
2.17.0
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
next reply other threads:[~2018-05-08 13:23 UTC|newest]
Thread overview: 3+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-05-08 13:23 Lucas Stach [this message]
2018-05-08 13:23 ` [PATCH v2 2/2] drm/etnaviv: mmuv2: allocate 2nd level page tables on demand Lucas Stach
2018-05-15 16:03 ` [PATCH v2 1/2] drm/etnaviv: switch MMU page tables to writecombine memory Philipp Zabel
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180508132316.22018-1-l.stach@pengutronix.de \
--to=l.stach@pengutronix.de \
--cc=dri-devel@lists.freedesktop.org \
--cc=etnaviv@lists.freedesktop.org \
--cc=kernel@pengutronix.de \
--cc=linux+etnaviv@armlinux.org.uk \
--cc=patchwork-lst@pengutronix.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).