* [PATCH rc v4 1/5] iommu/arm-smmu-v3: Add arm_smmu_kdump_adopt_strtab() for kdump
2026-04-29 7:20 [PATCH rc v4 0/5] iommu/arm-smmu-v3: Fix device crash on kdump kernel Nicolin Chen
@ 2026-04-29 7:20 ` Nicolin Chen
2026-04-29 21:37 ` Nicolin Chen
2026-04-29 7:20 ` [PATCH rc v4 2/5] iommu/arm-smmu-v3: Implement is_attach_deferred() " Nicolin Chen
` (3 subsequent siblings)
4 siblings, 1 reply; 9+ messages in thread
From: Nicolin Chen @ 2026-04-29 7:20 UTC (permalink / raw)
To: will, robin.murphy, jgg, kevin.tian
Cc: joro, praan, kees, baolu.lu, miko.lenczewski, smostafa,
linux-arm-kernel, iommu, linux-kernel, stable, jamien
When transitioning to a kdump kernel, the primary kernel might have crashed
while endpoint devices were actively bus-mastering DMA. Currently, the SMMU
driver aggressively resets the hardware during probe by clearing CR0_SMMUEN
and setting the Global Bypass Attribute (GBPA) to ABORT.
In a kdump scenario, this aggressive reset is highly destructive:
a) If GBPA is set to ABORT, in-flight DMA will be aborted, generating fatal
PCIe AER or SErrors that may panic the kdump kernel
b) If GBPA is set to BYPASS, in-flight DMA targeting some IOVAs will bypass
the SMMU and corrupt the physical memory at those 1:1 mapped IOVAs.
To safely absorb in-flight DMAs, a kdump kernel will have to leave SMMUEN=1
intact and avoid modifying STRTAB_BASE, allowing HW to continue translating
in-flight DMAs reusing the crashed kernel's page tables until the endpoint
device drivers probe and quiesce their respective hardware.
However, the ARM SMMUv3 architecture specification states that updating the
SMMU_STRTAB_BASE register while SMMUEN == 1 is UNPREDICTABLE or ignored.
This leaves a kdump kernel no choice but to adopt the stream table from the
crashed kernel.
Introduce ARM_SMMU_OPT_KDUMP_ADOPT and adopt functions memremapping all the
stream tables extracted from STRTAB_BASE and STRTAB_BASE_CFG.
Note that the adoption of the crashed kernel's stream table follows certain
strict rules, since the old stream table might be compromised. Thus, apply
a series of validations against the values read from the registers. If any
address or size doesn't pass the test, it means the stream table cannot be
trusted, so toss it entirely. To avoid OOM due to a deeply corrupted stream
table, the memremap for l2 tables is done on the kdump kernel's demand.
The new option will be set in a following change.
Fixes: b63b3439b856 ("iommu/arm-smmu-v3: Abort all transactions if SMMU is enabled in kdump kernel")
Cc: stable@vger.kernel.org # v6.12+
Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
---
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 1 +
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 304 +++++++++++++++++++-
2 files changed, 302 insertions(+), 3 deletions(-)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index ef42df4753ec4..cd60b692c3901 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -861,6 +861,7 @@ struct arm_smmu_device {
#define ARM_SMMU_OPT_MSIPOLL (1 << 2)
#define ARM_SMMU_OPT_CMDQ_FORCE_SYNC (1 << 3)
#define ARM_SMMU_OPT_TEGRA241_CMDQV (1 << 4)
+#define ARM_SMMU_OPT_KDUMP_ADOPT (1 << 5)
u32 options;
struct arm_smmu_cmdq cmdq;
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index e8d7dbe495f03..fbc0fa6f182c6 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -14,6 +14,7 @@
#include <linux/bitops.h>
#include <linux/crash_dump.h>
#include <linux/delay.h>
+#include <linux/dma-direct.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io-pgtable.h>
@@ -2040,16 +2041,110 @@ static void arm_smmu_init_initial_stes(struct arm_smmu_ste *strtab,
}
}
+/*
+ * Adopting the crashed kernel's stream table has risks: the physical addresses
+ * read from ARM_SMMU_STRTAB_BASE / L1 descriptors may be corrupted. Reject any
+ * range that overlaps the kdump kernel's critical regions.
+ */
+static bool arm_smmu_kdump_phys_is_corrupted(phys_addr_t base, size_t size)
+{
+ /* Must NOT overlap kdump kernel's own RAM -- silent corruption */
+ if (region_intersects(base, size, IORESOURCE_SYSTEM_RAM,
+ IORES_DESC_NONE) != REGION_DISJOINT)
+ return true;
+
+ /*
+ * Must NOT overlap any MMIO region -- fatal SError
+ *
+ * Note that a false positive is possible on platforms that register the
+ * reserved-memory regions where the crashed kernel's stream table might
+ * legitimately reside. The cost of a false reject will be a fallback to
+ * full reset (recoverable), while a missed MMIO mapping will be fatal.
+ */
+ if (region_intersects(base, size, IORESOURCE_MEM, IORES_DESC_NONE) !=
+ REGION_DISJOINT)
+ return true;
+
+ /*
+ * Note: physical holes are absent from iomem_resource, so a corrupted
+ * address pointing into one will not be caught here. Closing that gap
+ * requires a firmware memory map and is left as a future improvement.
+ */
+ return false;
+}
+
+static int arm_smmu_kdump_adopt_l2_strtab(struct arm_smmu_device *smmu, u32 sid,
+ u32 l1_idx, u64 l2_dma, u32 span,
+ struct arm_smmu_strtab_l2 **l2table)
+{
+ phys_addr_t base = dma_to_phys(smmu->dev, l2_dma);
+ struct arm_smmu_strtab_l2 *table;
+ size_t size;
+
+ /*
+ * Only a coherent SMMU is supported at this moment. For a non-coherent
+ * SMMU that wants to support ARM_SMMU_OPT_KDUMP_ADOPT, try MEMREMAP_WC.
+ */
+ if (WARN_ON(!(smmu->features & ARM_SMMU_FEAT_COHERENCY)))
+ return -EOPNOTSUPP;
+
+ /*
+ * Retest the memremap inputs in case the L1 descriptor was overwritten
+ * since adopt. Reject this master's insert; panic or SMMU-disable would
+ * either lose the vmcore or cascade aborts. Do not try to fix it, as it
+ * would break all other SIDs in the same bus (PCI case). The corruption
+ * blast radius is already bounded to that bus range.
+ */
+ if (span != STRTAB_SPLIT + 1) {
+ dev_err(smmu->dev,
+ "kdump: L1[%u] span %u changed since adopt (was %u)\n",
+ l1_idx, span, STRTAB_SPLIT + 1);
+ return -EINVAL;
+ }
+
+ size = (1UL << (span - 1)) * sizeof(struct arm_smmu_ste);
+ if (arm_smmu_kdump_phys_is_corrupted(base, size)) {
+ dev_err(smmu->dev,
+ "kdump: L1[%u] now points at a corrupt range\n",
+ l1_idx);
+ return -EINVAL;
+ }
+
+ table = devm_memremap(smmu->dev, base, size, MEMREMAP_WB);
+ if (IS_ERR(table)) {
+ dev_err(smmu->dev,
+ "kdump: failed to adopt l2 stream table for SID %u\n",
+ sid);
+ return PTR_ERR(table);
+ }
+
+ *l2table = table;
+ return 0;
+}
+
static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
{
dma_addr_t l2ptr_dma;
struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
struct arm_smmu_strtab_l2 **l2table;
+ u32 l1_idx = arm_smmu_strtab_l1_idx(sid);
- l2table = &cfg->l2.l2ptrs[arm_smmu_strtab_l1_idx(sid)];
+ l2table = &cfg->l2.l2ptrs[l1_idx];
if (*l2table)
return 0;
+ /* Deferred adoption of the crashed kernel's L2 table */
+ if (smmu->options & ARM_SMMU_OPT_KDUMP_ADOPT) {
+ /* L1 entry is shared with the SMMU and possibly rogue DMA */
+ u64 l2ptr = le64_to_cpu(READ_ONCE(cfg->l2.l1tab[l1_idx].l2ptr));
+ dma_addr_t l2_dma = l2ptr & STRTAB_L1_DESC_L2PTR_MASK;
+ u32 span = FIELD_GET(STRTAB_L1_DESC_SPAN, l2ptr);
+
+ if (span && l2_dma)
+ return arm_smmu_kdump_adopt_l2_strtab(
+ smmu, sid, l1_idx, l2_dma, span, l2table);
+ }
+
*l2table = dmam_alloc_coherent(smmu->dev, sizeof(**l2table),
&l2ptr_dma, GFP_KERNEL);
if (!*l2table) {
@@ -2061,8 +2156,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
arm_smmu_init_initial_stes((*l2table)->stes,
ARRAY_SIZE((*l2table)->stes));
- arm_smmu_write_strtab_l1_desc(&cfg->l2.l1tab[arm_smmu_strtab_l1_idx(sid)],
- l2ptr_dma);
+ arm_smmu_write_strtab_l1_desc(&cfg->l2.l1tab[l1_idx], l2ptr_dma);
return 0;
}
@@ -4556,10 +4650,213 @@ static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
return 0;
}
+static int arm_smmu_kdump_adopt_strtab_2lvl(struct arm_smmu_device *smmu,
+ u32 cfg_reg, dma_addr_t dma)
+{
+ u32 log2size = FIELD_GET(STRTAB_BASE_CFG_LOG2SIZE, cfg_reg);
+ u32 split = FIELD_GET(STRTAB_BASE_CFG_SPLIT, cfg_reg);
+ struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
+ phys_addr_t base;
+ u32 num_l1_ents;
+ size_t size;
+ int i;
+
+ /*
+ * Only a coherent SMMU is supported at this moment. For a non-coherent
+ * SMMU that wants to support ARM_SMMU_OPT_KDUMP_ADOPT, try MEMREMAP_WC.
+ */
+ if (WARN_ON(!(smmu->features & ARM_SMMU_FEAT_COHERENCY)))
+ return -EOPNOTSUPP;
+
+ if (log2size < split || log2size > smmu->sid_bits) {
+ dev_err(smmu->dev, "kdump: log2size %u out of range [%u, %u]\n",
+ log2size, split, smmu->sid_bits);
+ return -EINVAL;
+ }
+ if (split != STRTAB_SPLIT) {
+ dev_err(smmu->dev,
+ "kdump: unsupported STRTAB_SPLIT %u (expected %u)\n",
+ split, STRTAB_SPLIT);
+ return -EINVAL;
+ }
+
+ num_l1_ents = 1U << (log2size - split);
+ if (num_l1_ents > STRTAB_MAX_L1_ENTRIES) {
+ dev_err(smmu->dev, "kdump: l1 entries %u exceeds max %u\n",
+ num_l1_ents, STRTAB_MAX_L1_ENTRIES);
+ return -EINVAL;
+ }
+
+ cfg->l2.l1_dma = dma;
+ cfg->l2.num_l1_ents = num_l1_ents;
+
+ base = dma_to_phys(smmu->dev, dma);
+ size = num_l1_ents * sizeof(struct arm_smmu_strtab_l1);
+ if (arm_smmu_kdump_phys_is_corrupted(base, size)) {
+ dev_err(smmu->dev, "kdump: l1 stream table is corrupted\n");
+ return -EINVAL;
+ }
+
+ cfg->l2.l1tab = devm_memremap(smmu->dev, base, size, MEMREMAP_WB);
+ if (IS_ERR(cfg->l2.l1tab))
+ return PTR_ERR(cfg->l2.l1tab);
+
+ cfg->l2.l2ptrs = devm_kcalloc(smmu->dev, num_l1_ents,
+ sizeof(*cfg->l2.l2ptrs), GFP_KERNEL);
+ if (!cfg->l2.l2ptrs)
+ return -ENOMEM;
+
+ for (i = 0; i < num_l1_ents; i++) {
+ u64 l2ptr = le64_to_cpu(cfg->l2.l1tab[i].l2ptr);
+ dma_addr_t l2_dma = l2ptr & STRTAB_L1_DESC_L2PTR_MASK;
+ u32 span = FIELD_GET(STRTAB_L1_DESC_SPAN, l2ptr);
+
+ if (!span || !l2_dma)
+ continue;
+
+ if (span != STRTAB_SPLIT + 1) {
+ dev_err(smmu->dev,
+ "kdump: L1[%u] unsupported span %u (vs %u)\n",
+ i, span, STRTAB_SPLIT + 1);
+ return -EINVAL;
+ }
+
+ base = dma_to_phys(smmu->dev, l2_dma);
+ size = (1UL << (span - 1)) * sizeof(struct arm_smmu_ste);
+ if (arm_smmu_kdump_phys_is_corrupted(base, size)) {
+ dev_err(smmu->dev,
+ "kdump: l2 stream table is corrupted\n");
+ return -EINVAL;
+ }
+
+ /*
+ * If the crashed kernel's l1 descriptors are deeply corrupted,
+ * blindly memremapping every l2 table here could lead to OOM.
+ *
+ * Defer the l2 memremap to arm_smmu_init_l2_strtab(), so peak
+ * memory is bounded by the kdump kernel's actual demand.
+ */
+ }
+
+ return 0;
+}
+
+static int arm_smmu_kdump_adopt_strtab_linear(struct arm_smmu_device *smmu,
+ u32 cfg_reg, dma_addr_t dma)
+{
+ u32 log2size = FIELD_GET(STRTAB_BASE_CFG_LOG2SIZE, cfg_reg);
+ struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
+ unsigned int max_log2size;
+ phys_addr_t base;
+ size_t size;
+
+ /*
+ * Only a coherent SMMU is supported at this moment. For a non-coherent
+ * SMMU that wants to support ARM_SMMU_OPT_KDUMP_ADOPT, try MEMREMAP_WC.
+ */
+ if (WARN_ON(!(smmu->features & ARM_SMMU_FEAT_COHERENCY)))
+ return -EOPNOTSUPP;
+
+ /* Cap the size at what the kdump kernel itself would have allocated */
+ if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
+ max_log2size =
+ ilog2(STRTAB_MAX_L1_ENTRIES * STRTAB_NUM_L2_STES);
+ else
+ max_log2size = smmu->sid_bits;
+
+ /* cfg->linear.num_ents is unsigned int, so cap log2size at 31 */
+ max_log2size = min(max_log2size, 31U);
+ if (log2size > max_log2size) {
+ dev_err(smmu->dev, "kdump: unsupported log2size %u (> %u)\n",
+ log2size, max_log2size);
+ return -EINVAL;
+ }
+
+ /*
+ * We might end up with a num_ents != sid_bits, which is fine. In the
+ * ARM_SMMU_OPT_KDUMP_ADOPT case, arm_smmu_write_strtab() is bypassed.
+ */
+ cfg->linear.num_ents = 1U << log2size;
+ cfg->linear.ste_dma = dma;
+
+ base = dma_to_phys(smmu->dev, dma);
+ size = cfg->linear.num_ents * sizeof(struct arm_smmu_ste);
+ if (arm_smmu_kdump_phys_is_corrupted(base, size)) {
+ dev_err(smmu->dev, "kdump: stream table is corrupted\n");
+ return -EINVAL;
+ }
+
+ cfg->linear.table = devm_memremap(smmu->dev, base, size, MEMREMAP_WB);
+ if (IS_ERR(cfg->linear.table))
+ return PTR_ERR(cfg->linear.table);
+ return 0;
+}
+
+static void arm_smmu_kdump_adopt_cleanup(struct arm_smmu_device *smmu, u32 fmt)
+{
+ struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
+
+ if (fmt == STRTAB_BASE_CFG_FMT_2LVL) {
+ if (cfg->l2.l2ptrs)
+ devm_kfree(smmu->dev, cfg->l2.l2ptrs);
+ if (!IS_ERR_OR_NULL(cfg->l2.l1tab))
+ devm_memunmap(smmu->dev, cfg->l2.l1tab);
+ } else if (fmt == STRTAB_BASE_CFG_FMT_LINEAR) {
+ if (!IS_ERR_OR_NULL(cfg->linear.table))
+ devm_memunmap(smmu->dev, cfg->linear.table);
+ }
+}
+
+static int arm_smmu_kdump_adopt_strtab(struct arm_smmu_device *smmu)
+{
+ u32 cfg_reg = readl_relaxed(smmu->base + ARM_SMMU_STRTAB_BASE_CFG);
+ u64 base_reg = readq_relaxed(smmu->base + ARM_SMMU_STRTAB_BASE);
+ u32 fmt = FIELD_GET(STRTAB_BASE_CFG_FMT, cfg_reg);
+ dma_addr_t dma = base_reg & STRTAB_BASE_ADDR_MASK;
+ int ret;
+
+ dev_info(smmu->dev, "kdump: adopting crashed kernel's stream table\n");
+
+ if (fmt == STRTAB_BASE_CFG_FMT_2LVL) {
+ /*
+ * Both kernels run on the same hardware, so it's impossible for
+ * kdump kernel to see the support for linear stream table only.
+ */
+ if (WARN_ON(!(smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)))
+ ret = -EINVAL;
+ else
+ ret = arm_smmu_kdump_adopt_strtab_2lvl(smmu, cfg_reg,
+ dma);
+ } else if (fmt == STRTAB_BASE_CFG_FMT_LINEAR) {
+ /*
+ * In case that the old kernel for some reason used the linear
+ * format, enforce the same format to match the adopted table.
+ */
+ ret = arm_smmu_kdump_adopt_strtab_linear(smmu, cfg_reg, dma);
+ if (!ret)
+ smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB;
+ } else {
+ dev_err(smmu->dev, "kdump: invalid STRTAB format %u\n", fmt);
+ ret = -EINVAL;
+ }
+
+ if (ret) {
+ dev_warn(smmu->dev, "kdump: falling back to full reset\n");
+ arm_smmu_kdump_adopt_cleanup(smmu, fmt);
+ smmu->options &= ~ARM_SMMU_OPT_KDUMP_ADOPT;
+ memset(&smmu->strtab_cfg, 0, sizeof(smmu->strtab_cfg));
+ }
+ return ret;
+}
+
static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
{
int ret;
+ if ((smmu->options & ARM_SMMU_OPT_KDUMP_ADOPT) &&
+ !arm_smmu_kdump_adopt_strtab(smmu))
+ goto out;
+
if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
ret = arm_smmu_init_strtab_2lvl(smmu);
else
@@ -4567,6 +4864,7 @@ static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
if (ret)
return ret;
+out:
ida_init(&smmu->vmid_map);
return 0;
--
2.43.0
^ permalink raw reply related [flat|nested] 9+ messages in thread* Re: [PATCH rc v4 1/5] iommu/arm-smmu-v3: Add arm_smmu_kdump_adopt_strtab() for kdump
2026-04-29 7:20 ` [PATCH rc v4 1/5] iommu/arm-smmu-v3: Add arm_smmu_kdump_adopt_strtab() for kdump Nicolin Chen
@ 2026-04-29 21:37 ` Nicolin Chen
2026-04-30 11:55 ` Jason Gunthorpe
0 siblings, 1 reply; 9+ messages in thread
From: Nicolin Chen @ 2026-04-29 21:37 UTC (permalink / raw)
To: will, robin.murphy, jgg, kevin.tian
Cc: joro, praan, kees, baolu.lu, miko.lenczewski, smostafa,
linux-arm-kernel, iommu, linux-kernel, stable, jamien
On Wed, Apr 29, 2026 at 12:20:49AM -0700, Nicolin Chen wrote:
> +static int arm_smmu_kdump_adopt_strtab_2lvl(struct arm_smmu_device *smmu,
> + u32 cfg_reg, dma_addr_t dma)
[...]
> + for (i = 0; i < num_l1_ents; i++) {
> + u64 l2ptr = le64_to_cpu(cfg->l2.l1tab[i].l2ptr);
Sashiko pointed out a missing READ_ON here.
And in arm_smmu_is_attach_deferred() too.
I've made a small change to my local v5 tree:
@@ -4337,7 +4337,8 @@ static bool arm_smmu_is_attach_deferred(struct device *dev)
for (i = 0; i < master->num_streams; i++) {
struct arm_smmu_ste *ste =
arm_smmu_get_step_for_sid(smmu, master->streams[i].id);
- u64 ent0 = le64_to_cpu(ste->data[0]);
+ /* Pairing READ_ONCE() with the WRITE_ONCE() in entry_set() */
+ u64 ent0 = le64_to_cpu(READ_ONCE(ste->data[0]));
/* Defer only when there might be in-flight DMAs */
if ((ent0 & STRTAB_STE_0_V) &&
@@ -4747,7 +4748,8 @@ static int arm_smmu_kdump_adopt_strtab_2lvl(struct arm_smmu_device *smmu,
return -ENOMEM;
for (i = 0; i < num_l1_ents; i++) {
- u64 l2ptr = le64_to_cpu(cfg->l2.l1tab[i].l2ptr);
+ /* L1 entry is shared with the SMMU and possibly rogue DMA */
+ u64 l2ptr = le64_to_cpu(READ_ONCE(cfg->l2.l1tab[i].l2ptr));
dma_addr_t l2_dma = l2ptr & STRTAB_L1_DESC_L2PTR_MASK;
u32 span = FIELD_GET(STRTAB_L1_DESC_SPAN, l2ptr);
Given these are two small changes, they shouldn't block community
review. I will wait for a few days before finalizing/sending v5.
Thanks
Nicolin
^ permalink raw reply [flat|nested] 9+ messages in thread* Re: [PATCH rc v4 1/5] iommu/arm-smmu-v3: Add arm_smmu_kdump_adopt_strtab() for kdump
2026-04-29 21:37 ` Nicolin Chen
@ 2026-04-30 11:55 ` Jason Gunthorpe
2026-04-30 15:24 ` Nicolin Chen
0 siblings, 1 reply; 9+ messages in thread
From: Jason Gunthorpe @ 2026-04-30 11:55 UTC (permalink / raw)
To: Nicolin Chen
Cc: will, robin.murphy, kevin.tian, joro, praan, kees, baolu.lu,
miko.lenczewski, smostafa, linux-arm-kernel, iommu, linux-kernel,
stable, jamien
On Wed, Apr 29, 2026 at 02:37:50PM -0700, Nicolin Chen wrote:
> On Wed, Apr 29, 2026 at 12:20:49AM -0700, Nicolin Chen wrote:
>
> > +static int arm_smmu_kdump_adopt_strtab_2lvl(struct arm_smmu_device *smmu,
> > + u32 cfg_reg, dma_addr_t dma)
> [...]
> > + for (i = 0; i < num_l1_ents; i++) {
> > + u64 l2ptr = le64_to_cpu(cfg->l2.l1tab[i].l2ptr);
>
> Sashiko pointed out a missing READ_ON here.
??
There is no concurrency at this point?
Jason
^ permalink raw reply [flat|nested] 9+ messages in thread* Re: [PATCH rc v4 1/5] iommu/arm-smmu-v3: Add arm_smmu_kdump_adopt_strtab() for kdump
2026-04-30 11:55 ` Jason Gunthorpe
@ 2026-04-30 15:24 ` Nicolin Chen
0 siblings, 0 replies; 9+ messages in thread
From: Nicolin Chen @ 2026-04-30 15:24 UTC (permalink / raw)
To: Jason Gunthorpe
Cc: will, robin.murphy, kevin.tian, joro, praan, kees, baolu.lu,
miko.lenczewski, smostafa, linux-arm-kernel, iommu, linux-kernel,
stable, jamien
On Thu, Apr 30, 2026 at 08:55:13AM -0300, Jason Gunthorpe wrote:
> On Wed, Apr 29, 2026 at 02:37:50PM -0700, Nicolin Chen wrote:
> > On Wed, Apr 29, 2026 at 12:20:49AM -0700, Nicolin Chen wrote:
> >
> > > +static int arm_smmu_kdump_adopt_strtab_2lvl(struct arm_smmu_device *smmu,
> > > + u32 cfg_reg, dma_addr_t dma)
> > [...]
> > > + for (i = 0; i < num_l1_ents; i++) {
> > > + u64 l2ptr = le64_to_cpu(cfg->l2.l1tab[i].l2ptr);
> >
> > Sashiko pointed out a missing READ_ON here.
>
> ??
>
> There is no concurrency at this point?
You are right. I got confused. I was thinking of an RID-sharing
case where some race might happen and adding READ_ONCE could be
defensive.
A kdump kernel has a very limited use. So, this isn't necessary.
Thanks
Nicolin
^ permalink raw reply [flat|nested] 9+ messages in thread
* [PATCH rc v4 2/5] iommu/arm-smmu-v3: Implement is_attach_deferred() for kdump
2026-04-29 7:20 [PATCH rc v4 0/5] iommu/arm-smmu-v3: Fix device crash on kdump kernel Nicolin Chen
2026-04-29 7:20 ` [PATCH rc v4 1/5] iommu/arm-smmu-v3: Add arm_smmu_kdump_adopt_strtab() for kdump Nicolin Chen
@ 2026-04-29 7:20 ` Nicolin Chen
2026-04-29 7:20 ` [PATCH rc v4 3/5] iommu/arm-smmu-v3: Skip EVTQ/PRIQ setup in kdump kernel Nicolin Chen
` (2 subsequent siblings)
4 siblings, 0 replies; 9+ messages in thread
From: Nicolin Chen @ 2026-04-29 7:20 UTC (permalink / raw)
To: will, robin.murphy, jgg, kevin.tian
Cc: joro, praan, kees, baolu.lu, miko.lenczewski, smostafa,
linux-arm-kernel, iommu, linux-kernel, stable, jamien
Though the kdump kernel adopts the crashed kernel's stream table, the iommu
core will still try to attach each probed device to a default domain, which
overwrites the adopted STE and breaks in-flight DMA from that device.
Implement an is_attach_deferred() callback to prevent this. For each device
that has STE.V=1 and STE.Cfg!=Abort in the adopted table, defer the default
domain attachment, until the device driver explicitly requests it.
Fixes: b63b3439b856 ("iommu/arm-smmu-v3: Abort all transactions if SMMU is enabled in kdump kernel")
Cc: stable@vger.kernel.org # v6.12+
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
---
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 24 +++++++++++++++++++++
1 file changed, 24 insertions(+)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index fbc0fa6f182c6..27b84688bcc99 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -4309,6 +4309,29 @@ static void arm_smmu_remove_master(struct arm_smmu_master *master)
kfree(master->build_invs);
}
+static bool arm_smmu_is_attach_deferred(struct device *dev)
+{
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+ struct arm_smmu_device *smmu = master->smmu;
+ int i;
+
+ if (!(smmu->options & ARM_SMMU_OPT_KDUMP_ADOPT))
+ return false;
+
+ for (i = 0; i < master->num_streams; i++) {
+ struct arm_smmu_ste *ste =
+ arm_smmu_get_step_for_sid(smmu, master->streams[i].id);
+ u64 ent0 = le64_to_cpu(ste->data[0]);
+
+ /* Defer only when there might be in-flight DMAs */
+ if ((ent0 & STRTAB_STE_0_V) &&
+ FIELD_GET(STRTAB_STE_0_CFG, ent0) != STRTAB_STE_0_CFG_ABORT)
+ return true;
+ }
+
+ return false;
+}
+
static struct iommu_device *arm_smmu_probe_device(struct device *dev)
{
int ret;
@@ -4471,6 +4494,7 @@ static const struct iommu_ops arm_smmu_ops = {
.hw_info = arm_smmu_hw_info,
.domain_alloc_sva = arm_smmu_sva_domain_alloc,
.domain_alloc_paging_flags = arm_smmu_domain_alloc_paging_flags,
+ .is_attach_deferred = arm_smmu_is_attach_deferred,
.probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device,
.device_group = arm_smmu_device_group,
--
2.43.0
^ permalink raw reply related [flat|nested] 9+ messages in thread* [PATCH rc v4 3/5] iommu/arm-smmu-v3: Skip EVTQ/PRIQ setup in kdump kernel
2026-04-29 7:20 [PATCH rc v4 0/5] iommu/arm-smmu-v3: Fix device crash on kdump kernel Nicolin Chen
2026-04-29 7:20 ` [PATCH rc v4 1/5] iommu/arm-smmu-v3: Add arm_smmu_kdump_adopt_strtab() for kdump Nicolin Chen
2026-04-29 7:20 ` [PATCH rc v4 2/5] iommu/arm-smmu-v3: Implement is_attach_deferred() " Nicolin Chen
@ 2026-04-29 7:20 ` Nicolin Chen
2026-04-29 7:20 ` [PATCH rc v4 4/5] iommu/arm-smmu-v3: Retain CR0_SMMUEN during kdump device reset Nicolin Chen
2026-04-29 7:20 ` [PATCH rc v4 5/5] iommu/arm-smmu-v3: Detect ARM_SMMU_OPT_KDUMP_ADOPT in probe() Nicolin Chen
4 siblings, 0 replies; 9+ messages in thread
From: Nicolin Chen @ 2026-04-29 7:20 UTC (permalink / raw)
To: will, robin.murphy, jgg, kevin.tian
Cc: joro, praan, kees, baolu.lu, miko.lenczewski, smostafa,
linux-arm-kernel, iommu, linux-kernel, stable, jamien
In kdump cases, the crashed kernel's CDs and page tables can be corrupted,
which could trigger event spamming. Also, we cannot serve page requests.
Skip the EVTQ/PRIQ setup entirely rather than enabling then disabling them.
Skip the IRQ setup and guard their thread functions as well.
Also add some inline comments explaining that.
Fixes: b63b3439b856 ("iommu/arm-smmu-v3: Abort all transactions if SMMU is enabled in kdump kernel")
Cc: stable@vger.kernel.org # v6.12+
Suggested-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
---
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 66 +++++++++++++++------
1 file changed, 48 insertions(+), 18 deletions(-)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 27b84688bcc99..17d5e1395e245 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -2363,6 +2363,14 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
+ /*
+ * A combined IRQ might call into this function with the queue disabled.
+ * E.g. kdump, where stale HW PROD vs SW CONS would drive a bogus drain
+ * and a CONS write to a disabled queue.
+ */
+ if (!(readl_relaxed(smmu->base + ARM_SMMU_CR0) & CR0_EVTQEN))
+ return IRQ_NONE;
+
do {
while (!queue_remove_raw(q, evt)) {
arm_smmu_decode_event(smmu, evt, &event);
@@ -2431,6 +2439,14 @@ static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
struct arm_smmu_ll_queue *llq = &q->llq;
u64 evt[PRIQ_ENT_DWORDS];
+ /*
+ * A combined IRQ might call into this function with the queue disabled.
+ * E.g. kdump, where stale HW PROD vs SW CONS would drive a bogus drain
+ * and a CONS write to a disabled queue.
+ */
+ if (!(readl_relaxed(smmu->base + ARM_SMMU_CR0) & CR0_PRIQEN))
+ return IRQ_NONE;
+
do {
while (!queue_remove_raw(q, evt))
arm_smmu_handle_ppr(smmu, evt);
@@ -5055,7 +5071,10 @@ static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu)
static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
{
int ret, irq;
- u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
+ u32 irqen_flags = IRQ_CTRL_GERROR_IRQEN;
+
+ if (!is_kdump_kernel())
+ irqen_flags |= IRQ_CTRL_EVTQ_IRQEN;
/* Disable IRQs first */
ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL,
@@ -5081,7 +5100,7 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
} else
arm_smmu_setup_unique_irqs(smmu);
- if (smmu->features & ARM_SMMU_FEAT_PRI)
+ if (!is_kdump_kernel() && (smmu->features & ARM_SMMU_FEAT_PRI))
irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
/* Enable interrupt generation on the SMMU */
@@ -5191,21 +5210,35 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu)
cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL;
arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
- /* Event queue */
- writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
- writel_relaxed(smmu->evtq.q.llq.prod, smmu->page1 + ARM_SMMU_EVTQ_PROD);
- writel_relaxed(smmu->evtq.q.llq.cons, smmu->page1 + ARM_SMMU_EVTQ_CONS);
-
- enables |= CR0_EVTQEN;
- ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
- ARM_SMMU_CR0ACK);
- if (ret) {
- dev_err(smmu->dev, "failed to enable event queue\n");
- return ret;
+ /*
+ * Event queue
+ *
+ * Do not enable in a kdump case, as the crashed kernel's CDs and page
+ * tables might be corrupted, triggering event spamming.
+ */
+ if (!is_kdump_kernel()) {
+ writeq_relaxed(smmu->evtq.q.q_base,
+ smmu->base + ARM_SMMU_EVTQ_BASE);
+ writel_relaxed(smmu->evtq.q.llq.prod,
+ smmu->page1 + ARM_SMMU_EVTQ_PROD);
+ writel_relaxed(smmu->evtq.q.llq.cons,
+ smmu->page1 + ARM_SMMU_EVTQ_CONS);
+
+ enables |= CR0_EVTQEN;
+ ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
+ ARM_SMMU_CR0ACK);
+ if (ret) {
+ dev_err(smmu->dev, "failed to enable event queue\n");
+ return ret;
+ }
}
- /* PRI queue */
- if (smmu->features & ARM_SMMU_FEAT_PRI) {
+ /*
+ * PRI queue
+ *
+ * Do not enable in a kdump case, as we cannot serve page requests.
+ */
+ if (!is_kdump_kernel() && (smmu->features & ARM_SMMU_FEAT_PRI)) {
writeq_relaxed(smmu->priq.q.q_base,
smmu->base + ARM_SMMU_PRIQ_BASE);
writel_relaxed(smmu->priq.q.llq.prod,
@@ -5238,9 +5271,6 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu)
return ret;
}
- if (is_kdump_kernel())
- enables &= ~(CR0_EVTQEN | CR0_PRIQEN);
-
/* Enable the SMMU interface */
enables |= CR0_SMMUEN;
ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
--
2.43.0
^ permalink raw reply related [flat|nested] 9+ messages in thread* [PATCH rc v4 4/5] iommu/arm-smmu-v3: Retain CR0_SMMUEN during kdump device reset
2026-04-29 7:20 [PATCH rc v4 0/5] iommu/arm-smmu-v3: Fix device crash on kdump kernel Nicolin Chen
` (2 preceding siblings ...)
2026-04-29 7:20 ` [PATCH rc v4 3/5] iommu/arm-smmu-v3: Skip EVTQ/PRIQ setup in kdump kernel Nicolin Chen
@ 2026-04-29 7:20 ` Nicolin Chen
2026-04-29 7:20 ` [PATCH rc v4 5/5] iommu/arm-smmu-v3: Detect ARM_SMMU_OPT_KDUMP_ADOPT in probe() Nicolin Chen
4 siblings, 0 replies; 9+ messages in thread
From: Nicolin Chen @ 2026-04-29 7:20 UTC (permalink / raw)
To: will, robin.murphy, jgg, kevin.tian
Cc: joro, praan, kees, baolu.lu, miko.lenczewski, smostafa,
linux-arm-kernel, iommu, linux-kernel, stable, jamien
When ARM_SMMU_OPT_KDUMP_ADOPT is detected, do not disable SMMUEN and skip
the CR1/CR2/STRTAB_BASE update sequence in arm_smmu_device_reset(). Those
register writes are all CONSTRAINED UNPREDICTABLE while CR0_SMMUEN==1, so
leaving them intact lets in-flight DMAs continue to be translated by the
adopted stream table.
Initialize 'enables' to 0 so it can carry CR0_SMMUEN in kdump case. Then,
preserve that when enabling the command queue.
Clear latched gerror bits if necessary.
Fixes: b63b3439b856 ("iommu/arm-smmu-v3: Abort all transactions if SMMU is enabled in kdump kernel")
Cc: stable@vger.kernel.org # v6.12+
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
---
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 47 +++++++++++++++++++--
1 file changed, 44 insertions(+), 3 deletions(-)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 17d5e1395e245..f9332cf0b28a6 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -5150,11 +5150,28 @@ static void arm_smmu_write_strtab(struct arm_smmu_device *smmu)
static int arm_smmu_device_reset(struct arm_smmu_device *smmu)
{
int ret;
- u32 reg, enables;
+ u32 reg, enables = 0;
struct arm_smmu_cmdq_ent cmd;
- /* Clear CR0 and sync (disables SMMU and queue processing) */
reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
+
+ /*
+ * In a kdump case (set when CR0_SMMUEN=1 and !GERROR_SFM_ERR), retain
+ * CR0_SMMUEN to avoid aborting in-flight DMA, and CR0_ATSCHK to carry
+ * on the ATS-check policy.
+ *
+ * According to spec, updating STRTAB_BASE/CR1/CR2 when CR0_SMMUEN=1 is
+ * CONSTRAINED UNPREDICTABLE. So, skip those register updates and rely
+ * on the adopted stream table from the crashed kernel.
+ */
+ if (smmu->options & ARM_SMMU_OPT_KDUMP_ADOPT) {
+ dev_info(smmu->dev,
+ "kdump: retaining SMMUEN for in-flight DMA\n");
+ enables = reg & (CR0_SMMUEN | CR0_ATSCHK);
+ goto reset_queues;
+ }
+
+ /* Clear CR0 and sync (disables SMMU and queue processing) */
if (reg & CR0_SMMUEN) {
dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0);
@@ -5184,12 +5201,36 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu)
/* Stream table */
arm_smmu_write_strtab(smmu);
+reset_queues:
+ if (smmu->options & ARM_SMMU_OPT_KDUMP_ADOPT) {
+ /* Disable queues since arm_smmu_device_disable() was skipped */
+ ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
+ ARM_SMMU_CR0ACK);
+ if (ret) {
+ dev_err(smmu->dev, "failed to disable queues\n");
+ return ret;
+ }
+ }
+
+ /*
+ * GERROR bits are latched. Read after queue disabling so that unhandled
+ * errors would be visible. Ack everything prior to re-enabling the CMDQ
+ * as a stale CMDQ_ERR would halt the CMDQ and new command will timeout.
+ */
+ if (is_kdump_kernel()) {
+ u32 gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR);
+ u32 gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN);
+
+ if ((gerror ^ gerrorn) & GERROR_ERR_MASK)
+ writel(gerror, smmu->base + ARM_SMMU_GERRORN);
+ }
+
/* Command queue */
writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
- enables = CR0_CMDQEN;
+ enables |= CR0_CMDQEN;
ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
ARM_SMMU_CR0ACK);
if (ret) {
--
2.43.0
^ permalink raw reply related [flat|nested] 9+ messages in thread* [PATCH rc v4 5/5] iommu/arm-smmu-v3: Detect ARM_SMMU_OPT_KDUMP_ADOPT in probe()
2026-04-29 7:20 [PATCH rc v4 0/5] iommu/arm-smmu-v3: Fix device crash on kdump kernel Nicolin Chen
` (3 preceding siblings ...)
2026-04-29 7:20 ` [PATCH rc v4 4/5] iommu/arm-smmu-v3: Retain CR0_SMMUEN during kdump device reset Nicolin Chen
@ 2026-04-29 7:20 ` Nicolin Chen
4 siblings, 0 replies; 9+ messages in thread
From: Nicolin Chen @ 2026-04-29 7:20 UTC (permalink / raw)
To: will, robin.murphy, jgg, kevin.tian
Cc: joro, praan, kees, baolu.lu, miko.lenczewski, smostafa,
linux-arm-kernel, iommu, linux-kernel, stable, jamien
arm_smmu_device_hw_probe() runs before arm_smmu_init_structures(), so it's
natural to decide whether the kdump kernel must adopt the crashed kernel's
stream table.
Given that memremap is used to adopt the old stream table, set this option
only on a coherent SMMU.
And make sure SMMU isn't in Service Failure Mode.
Fixes: b63b3439b856 ("iommu/arm-smmu-v3: Abort all transactions if SMMU is enabled in kdump kernel")
Cc: stable@vger.kernel.org # v6.12+
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
---
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 34 +++++++++++++++++++++
1 file changed, 34 insertions(+)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index f9332cf0b28a6..18e0d97cec401 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -5402,6 +5402,36 @@ static void arm_smmu_get_httu(struct arm_smmu_device *smmu, u32 reg)
hw_features, fw_features);
}
+static void arm_smmu_device_hw_probe_kdump(struct arm_smmu_device *smmu)
+{
+ u32 gerror, gerrorn, active;
+
+ /*
+ * If SMMU is already active in kdump case, there could be in-flight DMA
+ * from devices initiated by the crashed kernel.
+ */
+ if (!(readl_relaxed(smmu->base + ARM_SMMU_CR0) & CR0_SMMUEN))
+ return;
+
+ /* For now, only support a coherent SMMU that works with MEMREMAP_WB */
+ if (!(smmu->features & ARM_SMMU_FEAT_COHERENCY)) {
+ dev_warn(smmu->dev,
+ "kdump: non-coherent SMMU can't adopt stream table\n");
+ return;
+ }
+
+ gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR);
+ gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN);
+ active = gerror ^ gerrorn;
+ if (active & GERROR_SFM_ERR) {
+ dev_warn(smmu->dev,
+ "kdump: SMMU in Service Failure Mode, must reset\n");
+ return;
+ }
+
+ smmu->options |= ARM_SMMU_OPT_KDUMP_ADOPT;
+}
+
static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
{
u32 reg;
@@ -5616,6 +5646,10 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
dev_info(smmu->dev, "oas %lu-bit (features 0x%08x)\n",
smmu->oas, smmu->features);
+
+ if (is_kdump_kernel())
+ arm_smmu_device_hw_probe_kdump(smmu);
+
return 0;
}
--
2.43.0
^ permalink raw reply related [flat|nested] 9+ messages in thread