From: Rob Clark <robdclark@gmail.com>
To: dri-devel@lists.freedesktop.org
Cc: freedreno@lists.freedesktop.org, linux-arm-msm@vger.kernel.org,
Connor Abbott <cwabbott0@gmail.com>,
Rob Clark <robdclark@chromium.org>, Will Deacon <will@kernel.org>,
Robin Murphy <robin.murphy@arm.com>,
Joerg Roedel <joro@8bytes.org>, Jason Gunthorpe <jgg@ziepe.ca>,
Kevin Tian <kevin.tian@intel.com>,
Nicolin Chen <nicolinc@nvidia.com>,
Joao Martins <joao.m.martins@oracle.com>,
linux-arm-kernel@lists.infradead.org (moderated list:ARM SMMU
DRIVERS), iommu@lists.linux.dev (open list:IOMMU SUBSYSTEM),
linux-kernel@vger.kernel.org (open list)
Subject: [PATCH v4 03/33] iommu/io-pgtable-arm: Add quirk to quiet WARN_ON()
Date: Fri, 2 May 2025 09:56:30 -0700 [thread overview]
Message-ID: <20250502165831.44850-4-robdclark@gmail.com> (raw)
In-Reply-To: <20250502165831.44850-1-robdclark@gmail.com>
From: Rob Clark <robdclark@chromium.org>
In situations where mapping/unmapping squence can be controlled by
userspace, attempting to map over a region that has not yet been
unmapped is an error. But not something that should spam dmesg.
Now that there is a quirk, we can also drop the selftest_running
flag, and use the quirk instead for selftests.
Signed-off-by: Rob Clark <robdclark@chromium.org>
---
drivers/iommu/io-pgtable-arm.c | 27 ++++++++++++++-------------
include/linux/io-pgtable.h | 8 ++++++++
2 files changed, 22 insertions(+), 13 deletions(-)
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index f27965caf6a1..a535d88f8943 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -253,8 +253,6 @@ static inline bool arm_lpae_concat_mandatory(struct io_pgtable_cfg *cfg,
(data->start_level == 1) && (oas == 40);
}
-static bool selftest_running = false;
-
static dma_addr_t __arm_lpae_dma_addr(void *pages)
{
return (dma_addr_t)virt_to_phys(pages);
@@ -373,7 +371,7 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
for (i = 0; i < num_entries; i++)
if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) {
/* We require an unmap first */
- WARN_ON(!selftest_running);
+ WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN_ON));
return -EEXIST;
} else if (iopte_type(ptep[i]) == ARM_LPAE_PTE_TYPE_TABLE) {
/*
@@ -475,7 +473,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
cptep = iopte_deref(pte, data);
} else if (pte) {
/* We require an unmap first */
- WARN_ON(!selftest_running);
+ WARN_ON(!(cfg->quirks & IO_PGTABLE_QUIRK_NO_WARN_ON));
return -EEXIST;
}
@@ -649,8 +647,10 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
ptep += unmap_idx_start;
pte = READ_ONCE(*ptep);
- if (WARN_ON(!pte))
- return 0;
+ if (!pte) {
+ WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN_ON));
+ return -ENOENT;
+ }
/* If the size matches this level, we're in the right place */
if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
@@ -660,8 +660,10 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
/* Find and handle non-leaf entries */
for (i = 0; i < num_entries; i++) {
pte = READ_ONCE(ptep[i]);
- if (WARN_ON(!pte))
+ if (!pte) {
+ WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN_ON));
break;
+ }
if (!iopte_leaf(pte, lvl, iop->fmt)) {
__arm_lpae_clear_pte(&ptep[i], &iop->cfg, 1);
@@ -976,7 +978,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_ARM_TTBR1 |
IO_PGTABLE_QUIRK_ARM_OUTER_WBWA |
- IO_PGTABLE_QUIRK_ARM_HD))
+ IO_PGTABLE_QUIRK_ARM_HD |
+ IO_PGTABLE_QUIRK_NO_WARN_ON))
return NULL;
data = arm_lpae_alloc_pgtable(cfg);
@@ -1079,7 +1082,8 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
struct arm_lpae_io_pgtable *data;
typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
- if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_S2FWB))
+ if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_S2FWB |
+ IO_PGTABLE_QUIRK_NO_WARN_ON))
return NULL;
data = arm_lpae_alloc_pgtable(cfg);
@@ -1320,7 +1324,6 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
#define __FAIL(ops, i) ({ \
WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
arm_lpae_dump_ops(ops); \
- selftest_running = false; \
-EFAULT; \
})
@@ -1336,8 +1339,6 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
size_t size, mapped;
struct io_pgtable_ops *ops;
- selftest_running = true;
-
for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
cfg_cookie = cfg;
ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
@@ -1426,7 +1427,6 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
free_io_pgtable_ops(ops);
}
- selftest_running = false;
return 0;
}
@@ -1448,6 +1448,7 @@ static int __init arm_lpae_do_selftests(void)
.tlb = &dummy_tlb_ops,
.coherent_walk = true,
.iommu_dev = &dev,
+ .quirks = IO_PGTABLE_QUIRK_NO_WARN_ON,
};
/* __arm_lpae_alloc_pages() merely needs dev_to_node() to work */
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index bba2a51c87d2..639b8f4fb87d 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -88,6 +88,13 @@ struct io_pgtable_cfg {
*
* IO_PGTABLE_QUIRK_ARM_HD: Enables dirty tracking in stage 1 pagetable.
* IO_PGTABLE_QUIRK_ARM_S2FWB: Use the FWB format for the MemAttrs bits
+ *
+ * IO_PGTABLE_QUIRK_NO_WARN_ON: Do not WARN_ON() on conflicting
+ * mappings, but silently return -EEXISTS. Normally an attempt
+ * to map over an existing mapping would indicate some sort of
+ * kernel bug, which would justify the WARN_ON(). But for GPU
+ * drivers, this could be under control of userspace. Which
+ * deserves an error return, but not to spam dmesg.
*/
#define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
#define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
@@ -97,6 +104,7 @@ struct io_pgtable_cfg {
#define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6)
#define IO_PGTABLE_QUIRK_ARM_HD BIT(7)
#define IO_PGTABLE_QUIRK_ARM_S2FWB BIT(8)
+ #define IO_PGTABLE_QUIRK_NO_WARN_ON BIT(9)
unsigned long quirks;
unsigned long pgsize_bitmap;
unsigned int ias;
--
2.49.0
next prev parent reply other threads:[~2025-05-02 17:07 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-05-02 16:56 [PATCH v4 00/33] drm/msm: sparse / "VM_BIND" support Rob Clark
2025-05-02 16:56 ` [PATCH v4 01/33] drm/gpuvm: Don't require obj lock in destructor path Rob Clark
2025-05-02 16:56 ` [PATCH v4 02/33] drm/gpuvm: Allow VAs to hold soft reference to BOs Rob Clark
2025-05-02 16:56 ` Rob Clark [this message]
2025-05-02 19:29 ` [PATCH v4 03/33] iommu/io-pgtable-arm: Add quirk to quiet WARN_ON() ALOK TIWARI
2025-05-02 16:56 ` [PATCH v4 04/33] drm/msm: Rename msm_file_private -> msm_context Rob Clark
2025-05-02 16:56 ` [PATCH v4 05/33] drm/msm: Improve msm_context comments Rob Clark
2025-05-02 16:56 ` [PATCH v4 06/33] drm/msm: Rename msm_gem_address_space -> msm_gem_vm Rob Clark
2025-05-02 16:56 ` [PATCH v4 07/33] drm/msm: Remove vram carveout support Rob Clark
2025-05-02 16:56 ` [PATCH v4 08/33] drm/msm: Collapse vma allocation and initialization Rob Clark
2025-05-02 16:56 ` [PATCH v4 09/33] drm/msm: Collapse vma close and delete Rob Clark
2025-05-02 16:56 ` [PATCH v4 10/33] drm/msm: Don't close VMAs on purge Rob Clark
2025-05-02 16:56 ` [PATCH v4 11/33] drm/msm: drm_gpuvm conversion Rob Clark
2025-05-02 16:56 ` [PATCH v4 12/33] drm/msm: Convert vm locking Rob Clark
2025-05-02 16:56 ` [PATCH v4 13/33] drm/msm: Use drm_gpuvm types more Rob Clark
2025-05-02 16:56 ` [PATCH v4 14/33] drm/msm: Split out helper to get iommu prot flags Rob Clark
2025-05-02 16:56 ` [PATCH v4 15/33] drm/msm: Add mmu support for non-zero offset Rob Clark
2025-05-02 16:56 ` [PATCH v4 16/33] drm/msm: Add PRR support Rob Clark
2025-05-02 16:56 ` [PATCH v4 17/33] drm/msm: Rename msm_gem_vma_purge() -> _unmap() Rob Clark
2025-05-02 16:56 ` [PATCH v4 18/33] drm/msm: Lazily create context VM Rob Clark
2025-05-02 16:56 ` [PATCH v4 19/33] drm/msm: Add opt-in for VM_BIND Rob Clark
2025-05-02 16:56 ` [PATCH v4 20/33] drm/msm: Mark VM as unusable on GPU hangs Rob Clark
2025-05-02 16:56 ` [PATCH v4 21/33] drm/msm: Add _NO_SHARE flag Rob Clark
2025-05-05 7:54 ` Christian König
2025-05-05 14:15 ` Rob Clark
2025-05-05 15:17 ` Christian König
2025-05-02 16:56 ` [PATCH v4 22/33] drm/msm: Crashdump prep for sparse mappings Rob Clark
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250502165831.44850-4-robdclark@gmail.com \
--to=robdclark@gmail.com \
--cc=cwabbott0@gmail.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=freedreno@lists.freedesktop.org \
--cc=iommu@lists.linux.dev \
--cc=jgg@ziepe.ca \
--cc=joao.m.martins@oracle.com \
--cc=joro@8bytes.org \
--cc=kevin.tian@intel.com \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-arm-msm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=nicolinc@nvidia.com \
--cc=robdclark@chromium.org \
--cc=robin.murphy@arm.com \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox