From: Thierry Reding <thierry.reding-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
To: Ben Skeggs <bskeggs-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>,
Thierry Reding
<thierry.reding-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
Cc: linux-tegra-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
nouveau-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org,
dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
Subject: [PATCH 09/11] drm/nouveau: tegra: Fall back to 32-bit DMA mask without IOMMU
Date: Mon, 16 Sep 2019 17:04:10 +0200 [thread overview]
Message-ID: <20190916150412.10025-10-thierry.reding@gmail.com> (raw)
In-Reply-To: <20190916150412.10025-1-thierry.reding-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
From: Thierry Reding <treding@nvidia.com>
The GPU can usually address more than 32-bit, even without being
attached to an IOMMU. However, if the GPU is not attached to an IOMMU,
it's likely that there is no IOMMU in the system, in which case any
buffers allocated by Nouveau will likely end up in a region of memory
that cannot be accessed by host1x.
Signed-off-by: Thierry Reding <treding@nvidia.com>
---
.../drm/nouveau/nvkm/engine/device/tegra.c | 111 +++++++++++-------
1 file changed, 70 insertions(+), 41 deletions(-)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index fc652aaa41c7..221238a2cf53 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -97,7 +97,7 @@ nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev)
return 0;
}
-static void
+static int
nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
{
#if IS_ENABLED(CONFIG_IOMMU_API)
@@ -111,47 +111,65 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
* IOMMU.
*/
if (iommu_get_domain_for_dev(dev))
- return;
+ return -ENODEV;
if (!tdev->func->iommu_bit)
- return;
+ return -ENODEV;
+
+ if (!iommu_present(&platform_bus_type))
+ return -ENODEV;
mutex_init(&tdev->iommu.mutex);
- if (iommu_present(&platform_bus_type)) {
- tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
- if (!tdev->iommu.domain)
- goto error;
+ tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
+ if (!tdev->iommu.domain)
+ return -ENOMEM;
- /*
- * A IOMMU is only usable if it supports page sizes smaller
- * or equal to the system's PAGE_SIZE, with a preference if
- * both are equal.
- */
- pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap;
- if (pgsize_bitmap & PAGE_SIZE) {
- tdev->iommu.pgshift = PAGE_SHIFT;
- } else {
- tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK);
- if (tdev->iommu.pgshift == 0) {
- dev_warn(dev, "unsupported IOMMU page size\n");
- goto free_domain;
- }
- tdev->iommu.pgshift -= 1;
+ /*
+ * An IOMMU is only usable if it supports page sizes smaller or equal
+ * to the system's PAGE_SIZE, with a preference if both are equal.
+ */
+ pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap;
+ if (pgsize_bitmap & PAGE_SIZE) {
+ tdev->iommu.pgshift = PAGE_SHIFT;
+ } else {
+ tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK);
+ if (tdev->iommu.pgshift == 0) {
+ dev_warn(dev, "unsupported IOMMU page size\n");
+ ret = -ENOTSUPP;
+ goto free_domain;
}
- ret = iommu_attach_device(tdev->iommu.domain, dev);
- if (ret)
- goto free_domain;
+ tdev->iommu.pgshift -= 1;
+ }
- ret = nvkm_mm_init(&tdev->iommu.mm, 0, 0,
- (1ULL << tdev->func->iommu_bit) >>
- tdev->iommu.pgshift, 1);
- if (ret)
- goto detach_device;
+ ret = iommu_attach_device(tdev->iommu.domain, dev);
+ if (ret) {
+ dev_warn(dev, "failed to attach to IOMMU: %d\n", ret);
+ goto free_domain;
+ }
+
+ ret = nvkm_mm_init(&tdev->iommu.mm, 0, 0,
+ (1ULL << tdev->func->iommu_bit) >>
+ tdev->iommu.pgshift, 1);
+ if (ret) {
+ dev_warn(dev, "failed to initialize IOVA space: %d\n", ret);
+ goto detach_device;
+ }
+
+ /*
+ * The IOMMU bit defines the upper limit of the GPU-addressable space.
+ */
+ ret = dma_set_mask(dev, DMA_BIT_MASK(tdev->func->iommu_bit));
+ if (ret) {
+ dev_warn(dev, "failed to set DMA mask: %d\n", ret);
+ goto fini_mm;
}
- return;
+ return 0;
+
+fini_mm:
+ nvkm_mm_fini(&tdev->iommu.mm);
detach_device:
iommu_detach_device(tdev->iommu.domain, dev);
@@ -159,10 +177,15 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
free_domain:
iommu_domain_free(tdev->iommu.domain);
-error:
+ /* reset these so that the DMA API code paths are executed */
tdev->iommu.domain = NULL;
tdev->iommu.pgshift = 0;
- dev_err(dev, "cannot initialize IOMMU MM\n");
+
+ dev_warn(dev, "cannot initialize IOMMU MM\n");
+
+ return ret;
+#else
+ return -ENOTSUPP;
#endif
}
@@ -327,14 +350,20 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
goto free;
}
- /**
- * The IOMMU bit defines the upper limit of the GPU-addressable space.
- */
- ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(tdev->func->iommu_bit));
- if (ret)
- goto free;
-
- nvkm_device_tegra_probe_iommu(tdev);
+ ret = nvkm_device_tegra_probe_iommu(tdev);
+ if (ret) {
+ /*
+ * If we fail to set up an IOMMU, fall back to a 32-bit DMA
+ * mask. This is not necessary for the GPU to work because it
+ * can usually address all of system memory. However, if the
+ * buffers allocated by Nouveau are meant to be shared with
+ * the display controller, we need to restrict where they can
+ * come from.
+ */
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto free;
+ }
ret = nvkm_device_tegra_power_up(tdev);
if (ret)
--
2.23.0
_______________________________________________
Nouveau mailing list
Nouveau@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/nouveau
next prev parent reply other threads:[~2019-09-16 15:04 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-09-16 15:04 [PATCH 00/11] drm/nouveau: Enable GP10B by default Thierry Reding
2019-09-16 15:04 ` [PATCH 03/11] drm/nouveau: secboot: Read WPR configuration from GPU registers Thierry Reding
[not found] ` <20190916150412.10025-4-thierry.reding-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2019-09-17 3:49 ` Ben Skeggs
[not found] ` <CACAvsv6AcwWW542AJNkyR-q+aQ0GLFc0C3Sior_bYPTEjBV4LA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2019-09-17 8:40 ` Thierry Reding
2019-09-17 23:28 ` Ben Skeggs
2019-09-16 15:04 ` [PATCH 04/11] drm/nouveau: gp10b: Add custom L2 cache implementation Thierry Reding
2019-09-16 15:35 ` Ben Dooks
2019-09-16 15:49 ` Thierry Reding
2019-09-16 15:54 ` Thierry Reding
2019-09-24 8:50 ` Joerg Roedel
[not found] ` <20190916150412.10025-1-thierry.reding-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2019-09-16 15:04 ` [PATCH 01/11] drm/nouveau: tegra: Avoid pulsing reset twice Thierry Reding
2019-09-16 15:04 ` [PATCH 02/11] drm/nouveau: tegra: Set clock rate if not set Thierry Reding
2019-09-16 15:04 ` [PATCH 05/11] drm/nouveau: gp10b: Use correct copy engine Thierry Reding
2019-09-16 15:04 ` [PATCH 06/11] drm/nouveau: gk20a: Set IOMMU bit for DMA API if appropriate Thierry Reding
2019-09-16 15:04 ` [PATCH 07/11] drm/nouveau: gk20a: Implement custom MMU class Thierry Reding
2019-09-16 15:04 ` [PATCH 08/11] drm/nouveau: tegra: Skip IOMMU initialization if already attached Thierry Reding
2019-09-16 15:29 ` Robin Murphy
2019-09-16 15:57 ` Thierry Reding
2019-09-16 16:15 ` Robin Murphy
2019-09-17 7:59 ` Thierry Reding
2019-09-16 15:04 ` Thierry Reding [this message]
2019-09-16 15:04 ` [PATCH 10/11] arm64: tegra: Enable GPU on Jetson TX2 Thierry Reding
2019-09-16 15:04 ` [PATCH 11/11] arm64: tegra: Enable SMMU for GPU on Tegra186 Thierry Reding
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190916150412.10025-10-thierry.reding@gmail.com \
--to=thierry.reding-re5jqeeqqe8avxtiumwx3w@public.gmane.org \
--cc=bskeggs-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org \
--cc=dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org \
--cc=linux-tegra-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
--cc=nouveau-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).