From: Pranjal Shrivastava <praan@google.com>
To: iommu@lists.linux.dev
Cc: Will Deacon <will@kernel.org>, Joerg Roedel <joro@8bytes.org>,
Robin Murphy <robin.murphy@arm.com>,
Jason Gunthorpe <jgg@ziepe.ca>,
Mostafa Saleh <smostafa@google.com>,
Nicolin Chen <nicolinc@nvidia.com>,
Daniel Mentz <danielmentz@google.com>,
Ashish Mhetre <amhetre@nvidia.com>,
Sairaj Kodilkar <sarunkod@amd.com>,
Pranjal Shrivastava <praan@google.com>
Subject: [PATCH v5 10/10] iommu/arm-smmu-v3: Invoke pm_runtime before hw access
Date: Mon, 26 Jan 2026 15:11:57 +0000 [thread overview]
Message-ID: <20260126151157.3418145-11-praan@google.com> (raw)
In-Reply-To: <20260126151157.3418145-1-praan@google.com>
Invoke the pm_runtime helpers at all places before accessing the hw.
The idea is to invoke runtime_pm helpers at common points which are
used by exposed ops or interrupt handlers. Elide all TLB and CFG
invalidations if the smmu is suspended but not ATC invalidations.
Signed-off-by: Pranjal Shrivastava <praan@google.com>
---
.../arm/arm-smmu-v3/arm-smmu-v3-iommufd.c | 10 +-
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 92 +++++++++++++++++--
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 3 +
3 files changed, 97 insertions(+), 8 deletions(-)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
index 823461a26659..f1f0920ac12c 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
@@ -15,6 +15,11 @@ void *arm_smmu_hw_info(struct device *dev, u32 *length,
struct iommu_hw_info_arm_smmuv3 *info;
u32 __iomem *base_idr;
unsigned int i;
+ int ret;
+
+ ret = arm_smmu_rpm_get(master->smmu);
+ if (ret < 0)
+ return ERR_PTR(-EIO);
if (*type != IOMMU_HW_INFO_TYPE_DEFAULT &&
*type != IOMMU_HW_INFO_TYPE_ARM_SMMUV3) {
@@ -24,8 +29,10 @@ void *arm_smmu_hw_info(struct device *dev, u32 *length,
}
info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
+ if (!info) {
+ arm_smmu_rpm_put(master->smmu);
return ERR_PTR(-ENOMEM);
+ }
base_idr = master->smmu->base + ARM_SMMU_IDR0;
for (i = 0; i <= 5; i++)
@@ -36,6 +43,7 @@ void *arm_smmu_hw_info(struct device *dev, u32 *length,
*length = sizeof(*info);
*type = IOMMU_HW_INFO_TYPE_ARM_SMMUV3;
+ arm_smmu_rpm_put(master->smmu);
return info;
}
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 9931dde41239..9e2d16c128b0 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -110,7 +110,7 @@ static const char * const event_class_str[] = {
static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master);
/* Runtime PM helpers */
-__maybe_unused static int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
+int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
{
int ret;
@@ -125,7 +125,7 @@ __maybe_unused static int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
return 0;
}
-__maybe_unused static void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
+void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
{
int ret;
@@ -1106,7 +1106,9 @@ static void arm_smmu_page_response(struct device *dev, struct iopf_fault *unused
{
struct arm_smmu_cmdq_ent cmd = {0};
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+ struct arm_smmu_device *smmu = master->smmu;
int sid = master->streams[0].id;
+ int ret;
if (WARN_ON(!master->stall_enabled))
return;
@@ -1126,6 +1128,25 @@ static void arm_smmu_page_response(struct device *dev, struct iopf_fault *unused
break;
}
+ /*
+ * The SMMU is guaranteed to be active via device_link if any master is
+ * active. Furthermore, on suspend we set GBPA to abort, flushing any
+ * pending stalled transactions.
+ *
+ * Receiving a page fault while suspended implies a bug in the power
+ * dependency chain or a stale event. Since the SMMU is powered down
+ * and the command queue is inaccessible, we cannot issue the
+ * RESUME command and must drop it.
+ */
+ if (!atomic_inc_not_zero(&smmu->nr_cmdq_users)) {
+ dev_err(smmu->dev, "Ignoring page fault while suspended\n");
+ return;
+ }
+
+ ret = arm_smmu_rpm_get(smmu);
+ if (ret < 0)
+ goto out;
+
arm_smmu_cmdq_issue_cmd(master->smmu, &cmd);
/*
* Don't send a SYNC, it doesn't do anything for RESUME or PRI_RESP.
@@ -1133,6 +1154,9 @@ static void arm_smmu_page_response(struct device *dev, struct iopf_fault *unused
* terminated... at some point in the future. PRI_RESP is fire and
* forget.
*/
+ arm_smmu_rpm_put(smmu);
+out:
+ atomic_dec_return_release(&smmu->nr_cmdq_users);
}
/* Context descriptor manipulation functions */
@@ -2111,6 +2135,7 @@ static void arm_smmu_dump_event(struct arm_smmu_device *smmu, u64 *raw,
static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
{
+ int ret;
u64 evt[EVTQ_ENT_DWORDS];
struct arm_smmu_event event = {0};
struct arm_smmu_device *smmu = dev;
@@ -2119,6 +2144,10 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
+ ret = arm_smmu_rpm_get(smmu);
+ if (ret < 0)
+ return IRQ_NONE;
+
do {
while (!queue_remove_raw(q, evt)) {
arm_smmu_decode_event(smmu, evt, &event);
@@ -2139,6 +2168,7 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
/* Sync our overflow flag, as we believe we're up to speed */
queue_sync_cons_ovf(q);
+ arm_smmu_rpm_put(smmu);
return IRQ_HANDLED;
}
@@ -2186,6 +2216,11 @@ static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
struct arm_smmu_queue *q = &smmu->priq.q;
struct arm_smmu_ll_queue *llq = &q->llq;
u64 evt[PRIQ_ENT_DWORDS];
+ int ret;
+
+ ret = arm_smmu_rpm_get(smmu);
+ if (ret < 0)
+ return IRQ_NONE;
do {
while (!queue_remove_raw(q, evt))
@@ -2197,6 +2232,7 @@ static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
/* Sync our overflow flag, as we believe we're up to speed */
queue_sync_cons_ovf(q);
+ arm_smmu_rpm_put(smmu);
return IRQ_HANDLED;
}
@@ -2251,8 +2287,21 @@ static irqreturn_t arm_smmu_handle_gerror(struct arm_smmu_device *smmu)
static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev)
{
struct arm_smmu_device *smmu = dev;
+ irqreturn_t ret;
+
+ /* If we are suspended, this is a spurious interrupt */
+ if (!atomic_read(&smmu->nr_cmdq_users) ||
+ (pm_runtime_enabled(smmu->dev) &&
+ pm_runtime_get_if_active(smmu->dev) <= 0)) {
+ dev_err(smmu->dev,
+ "Ignoring gerror interrupt because the SMMU is suspended\n");
+ return IRQ_NONE;
+ }
- return arm_smmu_handle_gerror(smmu);
+ ret = arm_smmu_handle_gerror(smmu);
+ arm_smmu_rpm_put(smmu);
+
+ return ret;
}
static irqreturn_t arm_smmu_combined_irq_thread(int irq, void *dev)
@@ -2342,26 +2391,33 @@ arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size,
static int arm_smmu_atc_inv_master(struct arm_smmu_master *master,
ioasid_t ssid)
{
- int i;
+ int i, ret;
struct arm_smmu_cmdq_ent cmd;
struct arm_smmu_cmdq_batch cmds;
arm_smmu_atc_inv_to_cmd(ssid, 0, 0, &cmd);
+ /* ATC invalidations shouldn't be elided */
+ ret = arm_smmu_rpm_get(master->smmu);
+ if (ret < 0)
+ return ret;
+
arm_smmu_cmdq_batch_init(master->smmu, &cmds, &cmd);
for (i = 0; i < master->num_streams; i++) {
cmd.atc.sid = master->streams[i].id;
arm_smmu_cmdq_batch_add(master->smmu, &cmds, &cmd);
}
- return arm_smmu_cmdq_batch_submit(master->smmu, &cmds);
+ ret = arm_smmu_cmdq_batch_submit(master->smmu, &cmds);
+ arm_smmu_rpm_put(master->smmu);
+ return ret;
}
int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
unsigned long iova, size_t size)
{
struct arm_smmu_master_domain *master_domain;
- int i;
+ int i, ret;
unsigned long flags;
struct arm_smmu_cmdq_ent cmd = {
.opcode = CMDQ_OP_ATC_INV,
@@ -2388,6 +2444,11 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
if (!atomic_read(&smmu_domain->nr_ats_masters))
return 0;
+ /* ATC invalidations shouldn't be elided */
+ ret = arm_smmu_rpm_get(smmu_domain->smmu);
+ if (ret < 0)
+ return ret;
+
arm_smmu_cmdq_batch_init(smmu_domain->smmu, &cmds, &cmd);
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
@@ -2416,7 +2477,9 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
}
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
- return arm_smmu_cmdq_batch_submit(smmu_domain->smmu, &cmds);
+ ret = arm_smmu_cmdq_batch_submit(smmu_domain->smmu, &cmds);
+ arm_smmu_rpm_put(smmu_domain->smmu);
+ return ret;
}
/* IO_PGTABLE API */
@@ -5117,10 +5180,19 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
static void arm_smmu_device_remove(struct platform_device *pdev)
{
struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
+ int ret;
iommu_device_unregister(&smmu->iommu);
iommu_device_sysfs_remove(&smmu->iommu);
+
+ ret = arm_smmu_rpm_get(smmu);
+ if (ret < 0)
+ goto free_iopf;
+
arm_smmu_device_disable(smmu);
+ arm_smmu_rpm_put(smmu);
+
+free_iopf:
iopf_queue_free(smmu->evtq.iopf);
ida_destroy(&smmu->vmid_map);
}
@@ -5128,8 +5200,14 @@ static void arm_smmu_device_remove(struct platform_device *pdev)
static void arm_smmu_device_shutdown(struct platform_device *pdev)
{
struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = arm_smmu_rpm_get(smmu);
+ if (ret < 0)
+ return;
arm_smmu_device_disable(smmu);
+ arm_smmu_rpm_put(smmu);
}
static int __maybe_unused arm_smmu_runtime_suspend(struct device *dev)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 6a752fe06f54..41e8da97193a 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -1023,6 +1023,9 @@ int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
struct arm_smmu_cmdq *cmdq, u64 *cmds, int n,
bool sync);
+int arm_smmu_rpm_get(struct arm_smmu_device *smmu);
+void arm_smmu_rpm_put(struct arm_smmu_device *smmu);
+
#ifdef CONFIG_ARM_SMMU_V3_SVA
bool arm_smmu_sva_supported(struct arm_smmu_device *smmu);
void arm_smmu_sva_notifier_synchronize(void);
--
2.52.0.457.g6b5491de43-goog
next prev parent reply other threads:[~2026-01-26 15:12 UTC|newest]
Thread overview: 33+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-01-26 15:11 [PATCH v5 00/10] iommu/arm-smmu-v3: Implement Runtime/System Sleep ops Pranjal Shrivastava
2026-01-26 15:11 ` [PATCH v5 01/10] iommu/arm-smmu-v3: Refactor arm_smmu_setup_irqs Pranjal Shrivastava
2026-01-26 15:11 ` [PATCH v5 02/10] iommu/arm-smmu-v3: Add a helper to drain cmd queues Pranjal Shrivastava
2026-01-26 15:11 ` [PATCH v5 03/10] iommu/tegra241-cmdqv: Add a helper to drain VCMDQs Pranjal Shrivastava
2026-01-26 15:11 ` [PATCH v5 04/10] iommu/tegra241-cmdqv: Restore PROD and CONS after resume Pranjal Shrivastava
2026-01-26 15:11 ` [PATCH v5 05/10] iommu/arm-smmu-v3: Cache and restore MSI config Pranjal Shrivastava
2026-01-26 15:11 ` [PATCH v5 06/10] iommu/arm-smmu-v3: Add a usage counter for cmdq Pranjal Shrivastava
2026-03-08 21:23 ` Daniel Mentz
2026-03-09 19:46 ` Pranjal Shrivastava
2026-03-09 21:13 ` Pranjal Shrivastava
2026-03-09 22:56 ` Daniel Mentz
2026-03-10 16:46 ` Pranjal Shrivastava
2026-03-09 22:41 ` Daniel Mentz
2026-03-10 16:54 ` Pranjal Shrivastava
2026-03-11 4:43 ` Daniel Mentz
2026-03-11 13:53 ` Pranjal Shrivastava
2026-03-11 22:56 ` Daniel Mentz
2026-03-16 15:36 ` Pranjal Shrivastava
2026-01-26 15:11 ` [PATCH v5 07/10] iommu/arm-smmu-v3: Implement pm_runtime & system sleep ops Pranjal Shrivastava
2026-03-10 1:45 ` Daniel Mentz
2026-03-10 16:58 ` Pranjal Shrivastava
2026-03-10 21:16 ` Daniel Mentz
2026-03-10 21:40 ` Pranjal Shrivastava
2026-03-11 0:12 ` Daniel Mentz
2026-03-11 5:31 ` Pranjal Shrivastava
2026-03-11 17:26 ` Daniel Mentz
2026-03-16 15:05 ` Pranjal Shrivastava
2026-03-12 19:20 ` Daniel Mentz
2026-01-26 15:11 ` [PATCH v5 08/10] iommu/arm-smmu-v3: Handle gerror during suspend Pranjal Shrivastava
2026-01-26 15:11 ` [PATCH v5 09/10] iommu/arm-smmu-v3: Enable pm_runtime and setup devlinks Pranjal Shrivastava
2026-01-26 15:11 ` Pranjal Shrivastava [this message]
2026-02-06 15:48 ` [PATCH v5 00/10] iommu/arm-smmu-v3: Implement Runtime/System Sleep ops Ashish Mhetre
2026-02-11 13:33 ` Pranjal Shrivastava
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260126151157.3418145-11-praan@google.com \
--to=praan@google.com \
--cc=amhetre@nvidia.com \
--cc=danielmentz@google.com \
--cc=iommu@lists.linux.dev \
--cc=jgg@ziepe.ca \
--cc=joro@8bytes.org \
--cc=nicolinc@nvidia.com \
--cc=robin.murphy@arm.com \
--cc=sarunkod@amd.com \
--cc=smostafa@google.com \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.