From: Johan Hovold <johan+linaro@kernel.org>
To: Marc Zyngier <maz@kernel.org>, Thomas Gleixner <tglx@linutronix.de>
Cc: x86@kernel.org, platform-driver-x86@vger.kernel.org,
linux-arm-kernel@lists.infradead.org, linux-mips@vger.kernel.org,
linux-kernel@vger.kernel.org,
Johan Hovold <johan+linaro@kernel.org>,
Hsin-Yi Wang <hsinyi@chromium.org>,
Mark-PK Tsai <mark-pk.tsai@mediatek.com>
Subject: [PATCH v4 19/19] irqdomain: Switch to per-domain locking
Date: Mon, 16 Jan 2023 14:50:44 +0100 [thread overview]
Message-ID: <20230116135044.14998-20-johan+linaro@kernel.org> (raw)
In-Reply-To: <20230116135044.14998-1-johan+linaro@kernel.org>
The IRQ domain structures are currently protected by the global
irq_domain_mutex. Switch to using more fine-grained per-domain locking,
which may potentially speed up parallel probing somewhat.
Note that the domain lock of the root domain (innermost domain) must be
used for hierarchical domains. For non-hierarchical domain (as for root
domains), the new root pointer is set to the domain itself so that
domain->root->mutex can be used in shared code paths.
Also note that hierarchical domains should be constructed using
irq_domain_create_hierarchy() (or irq_domain_add_hierarchy()) to avoid
poking at irqdomain internals. As a safeguard, the lockdep assertion in
irq_domain_set_mapping() will catch any offenders that fail to set the
root domain pointer.
Tested-by: Hsin-Yi Wang <hsinyi@chromium.org>
Tested-by: Mark-PK Tsai <mark-pk.tsai@mediatek.com>
Signed-off-by: Johan Hovold <johan+linaro@kernel.org>
---
include/linux/irqdomain.h | 4 +++
kernel/irq/irqdomain.c | 61 +++++++++++++++++++++++++--------------
2 files changed, 44 insertions(+), 21 deletions(-)
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 7fd3939328c2..b1b06d75d31a 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -125,6 +125,8 @@ struct irq_domain_chip_generic;
* core code.
* @flags: Per irq_domain flags
* @mapcount: The number of mapped interrupts
+ * @mutex: Domain lock, hierarhical domains use root domain's lock
+ * @root: Pointer to root domain, or containing structure if non-hierarchical
*
* Optional elements:
* @fwnode: Pointer to firmware node associated with the irq_domain. Pretty easy
@@ -152,6 +154,8 @@ struct irq_domain {
void *host_data;
unsigned int flags;
unsigned int mapcount;
+ struct mutex mutex;
+ struct irq_domain *root;
/* Optional data */
struct fwnode_handle *fwnode;
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 6f2b8a1248e1..77c31b89740d 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -226,6 +226,17 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int s
domain->revmap_size = size;
+ /*
+ * Hierarchical domains use the domain lock of the root domain
+ * (innermost domain).
+ *
+ * For non-hierarchical domains (as for root domains), the root
+ * pointer is set to the domain itself so that domain->root->mutex
+ * can be used in shared code paths.
+ */
+ mutex_init(&domain->mutex);
+ domain->root = domain;
+
irq_domain_check_hierarchy(domain);
mutex_lock(&irq_domain_mutex);
@@ -503,7 +514,7 @@ static bool irq_domain_is_nomap(struct irq_domain *domain)
static void irq_domain_clear_mapping(struct irq_domain *domain,
irq_hw_number_t hwirq)
{
- lockdep_assert_held(&irq_domain_mutex);
+ lockdep_assert_held(&domain->root->mutex);
if (irq_domain_is_nomap(domain))
return;
@@ -518,7 +529,11 @@ static void irq_domain_set_mapping(struct irq_domain *domain,
irq_hw_number_t hwirq,
struct irq_data *irq_data)
{
- lockdep_assert_held(&irq_domain_mutex);
+ /*
+ * This also makes sure that all domains point to the same root when
+ * called from irq_domain_insert_irq() for each domain in a hierarchy.
+ */
+ lockdep_assert_held(&domain->root->mutex);
if (irq_domain_is_nomap(domain))
return;
@@ -540,7 +555,7 @@ static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
hwirq = irq_data->hwirq;
- mutex_lock(&irq_domain_mutex);
+ mutex_lock(&domain->mutex);
irq_set_status_flags(irq, IRQ_NOREQUEST);
@@ -562,7 +577,7 @@ static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
/* Clear reverse map for this hwirq */
irq_domain_clear_mapping(domain, hwirq);
- mutex_unlock(&irq_domain_mutex);
+ mutex_unlock(&domain->mutex);
}
static int __irq_domain_associate(struct irq_domain *domain, unsigned int virq,
@@ -612,9 +627,9 @@ int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
{
int ret;
- mutex_lock(&irq_domain_mutex);
+ mutex_lock(&domain->mutex);
ret = __irq_domain_associate(domain, virq, hwirq);
- mutex_unlock(&irq_domain_mutex);
+ mutex_unlock(&domain->mutex);
return ret;
}
@@ -731,7 +746,7 @@ unsigned int irq_create_mapping_affinity(struct irq_domain *domain,
return 0;
}
- mutex_lock(&irq_domain_mutex);
+ mutex_lock(&domain->mutex);
/* Check if mapping already exists */
virq = irq_find_mapping(domain, hwirq);
@@ -742,7 +757,7 @@ unsigned int irq_create_mapping_affinity(struct irq_domain *domain,
virq = __irq_create_mapping_affinity(domain, hwirq, affinity);
out:
- mutex_unlock(&irq_domain_mutex);
+ mutex_unlock(&domain->mutex);
return virq;
}
@@ -811,7 +826,7 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK))
type &= IRQ_TYPE_SENSE_MASK;
- mutex_lock(&irq_domain_mutex);
+ mutex_lock(&domain->root->mutex);
/*
* If we've already configured this interrupt,
@@ -864,11 +879,11 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
/* Store trigger type */
irqd_set_trigger_type(irq_data, type);
out:
- mutex_unlock(&irq_domain_mutex);
+ mutex_unlock(&domain->root->mutex);
return virq;
err:
- mutex_unlock(&irq_domain_mutex);
+ mutex_unlock(&domain->root->mutex);
return 0;
}
@@ -1132,6 +1147,7 @@ struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
else
domain = irq_domain_create_tree(fwnode, ops, host_data);
if (domain) {
+ domain->root = parent->root;
domain->parent = parent;
domain->flags |= flags;
}
@@ -1528,10 +1544,10 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
return -EINVAL;
}
- mutex_lock(&irq_domain_mutex);
+ mutex_lock(&domain->root->mutex);
ret = ___irq_domain_alloc_irqs(domain, irq_base, nr_irqs, node, arg,
realloc, affinity);
- mutex_unlock(&irq_domain_mutex);
+ mutex_unlock(&domain->root->mutex);
return ret;
}
@@ -1542,7 +1558,7 @@ static void irq_domain_fix_revmap(struct irq_data *d)
{
void __rcu **slot;
- lockdep_assert_held(&irq_domain_mutex);
+ lockdep_assert_held(&d->domain->root->mutex);
if (irq_domain_is_nomap(d->domain))
return;
@@ -1608,7 +1624,7 @@ int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg)
if (!parent_irq_data)
return -ENOMEM;
- mutex_lock(&irq_domain_mutex);
+ mutex_lock(&domain->root->mutex);
/* Copy the original irq_data. */
*parent_irq_data = *irq_data;
@@ -1636,7 +1652,7 @@ int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg)
irq_domain_fix_revmap(parent_irq_data);
irq_domain_set_mapping(domain, irq_data->hwirq, irq_data);
error:
- mutex_unlock(&irq_domain_mutex);
+ mutex_unlock(&domain->root->mutex);
return rv;
}
@@ -1691,7 +1707,7 @@ int irq_domain_pop_irq(struct irq_domain *domain, int virq)
if (WARN_ON(!parent_irq_data))
return -EINVAL;
- mutex_lock(&irq_domain_mutex);
+ mutex_lock(&domain->root->mutex);
irq_data->parent_data = NULL;
@@ -1703,7 +1719,7 @@ int irq_domain_pop_irq(struct irq_domain *domain, int virq)
irq_domain_fix_revmap(irq_data);
- mutex_unlock(&irq_domain_mutex);
+ mutex_unlock(&domain->root->mutex);
kfree(parent_irq_data);
@@ -1719,17 +1735,20 @@ EXPORT_SYMBOL_GPL(irq_domain_pop_irq);
void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs)
{
struct irq_data *data = irq_get_irq_data(virq);
+ struct irq_domain *domain;
int i;
if (WARN(!data || !data->domain || !data->domain->ops->free,
"NULL pointer, cannot free irq\n"))
return;
- mutex_lock(&irq_domain_mutex);
+ domain = data->domain;
+
+ mutex_lock(&domain->root->mutex);
for (i = 0; i < nr_irqs; i++)
irq_domain_remove_irq(virq + i);
- irq_domain_free_irqs_hierarchy(data->domain, virq, nr_irqs);
- mutex_unlock(&irq_domain_mutex);
+ irq_domain_free_irqs_hierarchy(domain, virq, nr_irqs);
+ mutex_unlock(&domain->root->mutex);
irq_domain_free_irq_data(virq, nr_irqs);
irq_free_descs(virq, nr_irqs);
--
2.38.2
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
next prev parent reply other threads:[~2023-01-16 13:56 UTC|newest]
Thread overview: 44+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-01-16 13:50 [PATCH v4 00/19] irqdomain: fix mapping race and clean up locking Johan Hovold
2023-01-16 13:50 ` [PATCH v4 01/19] irqdomain: Drop bogus fwspec-mapping error handling Johan Hovold
2023-01-17 21:17 ` Thomas Gleixner
2023-01-16 13:50 ` [PATCH v4 02/19] irqdomain: Drop dead domain-name assignment Johan Hovold
2023-01-17 21:18 ` Thomas Gleixner
2023-01-16 13:50 ` [PATCH v4 03/19] irqdomain: Drop leftover brackets Johan Hovold
2023-01-17 21:19 ` Thomas Gleixner
2023-01-18 9:05 ` Johan Hovold
2023-01-16 13:50 ` [PATCH v4 04/19] irqdomain: Fix association race Johan Hovold
2023-01-17 21:20 ` Thomas Gleixner
2023-01-16 13:50 ` [PATCH v4 05/19] irqdomain: Fix disassociation race Johan Hovold
2023-01-16 13:50 ` [PATCH v4 06/19] irqdomain: Drop revmap mutex Johan Hovold
2023-01-17 21:23 ` Thomas Gleixner
2023-01-18 9:22 ` Johan Hovold
2023-01-18 13:05 ` Thomas Gleixner
2023-01-18 13:10 ` Johan Hovold
2023-02-06 13:09 ` Thomas Gleixner
2023-02-06 17:10 ` Johan Hovold
2023-01-16 13:50 ` [PATCH v4 07/19] irqdomain: Look for existing mapping only once Johan Hovold
2023-01-17 21:34 ` Thomas Gleixner
2023-01-18 9:26 ` Johan Hovold
2023-02-09 13:00 ` Johan Hovold
2023-01-16 13:50 ` [PATCH v4 08/19] irqdomain: Refactor __irq_domain_alloc_irqs() Johan Hovold
2023-01-17 21:34 ` Thomas Gleixner
2023-01-18 9:30 ` Johan Hovold
2023-01-16 13:50 ` [PATCH v4 09/19] irqdomain: Fix mapping-creation race Johan Hovold
2023-01-17 21:39 ` Thomas Gleixner
2023-01-18 9:40 ` Johan Hovold
2023-02-09 13:08 ` Johan Hovold
2023-01-16 13:50 ` [PATCH v4 10/19] irqdomain: Clean up irq_domain_push/pop_irq() Johan Hovold
2023-01-16 13:50 ` [PATCH v4 11/19] x86/ioapic: Use irq_domain_create_hierarchy() Johan Hovold
2023-01-17 21:41 ` Thomas Gleixner
2023-01-18 10:31 ` Johan Hovold
2023-01-16 13:50 ` [PATCH v4 12/19] x86/apic: " Johan Hovold
2023-01-16 13:50 ` [PATCH v4 13/19] irqchip/alpine-msi: Use irq_domain_add_hierarchy() Johan Hovold
2023-01-16 13:50 ` [PATCH v4 14/19] irqchip/gic-v2m: Use irq_domain_create_hierarchy() Johan Hovold
2023-01-16 13:50 ` [PATCH v4 15/19] irqchip/gic-v3-its: " Johan Hovold
2023-01-16 13:50 ` [PATCH v4 16/19] irqchip/gic-v3-mbi: " Johan Hovold
2023-01-16 13:50 ` [PATCH v4 17/19] irqchip/loongson-pch-msi: " Johan Hovold
2023-01-16 13:50 ` [PATCH v4 18/19] irqchip/mvebu-odmi: " Johan Hovold
2023-01-16 13:50 ` Johan Hovold [this message]
2023-01-17 21:50 ` [PATCH v4 19/19] irqdomain: Switch to per-domain locking Thomas Gleixner
2023-01-18 9:51 ` Johan Hovold
2023-01-18 13:09 ` Thomas Gleixner
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230116135044.14998-20-johan+linaro@kernel.org \
--to=johan+linaro@kernel.org \
--cc=hsinyi@chromium.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mips@vger.kernel.org \
--cc=mark-pk.tsai@mediatek.com \
--cc=maz@kernel.org \
--cc=platform-driver-x86@vger.kernel.org \
--cc=tglx@linutronix.de \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).