From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>, Jiri Slaby <jirislaby@kernel.org>
Subject: [patch 02/46] genirq/irqdesc: Switch to lock guards
Date: Thu, 13 Mar 2025 16:59:44 +0100 (CET) [thread overview]
Message-ID: <20250313155914.010145118@linutronix.de> (raw)
In-Reply-To: 20250313154615.860723120@linutronix.de
Replace all lock/unlock pairs with lock guards and simplify the code flow.
No functional change.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/irq/irqdesc.c | 136 +++++++++++++++------------------------------------
1 file changed, 42 insertions(+), 94 deletions(-)
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -266,104 +266,68 @@ static ssize_t per_cpu_count_show(struct
}
IRQ_ATTR_RO(per_cpu_count);
-static ssize_t chip_name_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+static ssize_t chip_name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
- ssize_t ret = 0;
-
- raw_spin_lock_irq(&desc->lock);
- if (desc->irq_data.chip && desc->irq_data.chip->name) {
- ret = scnprintf(buf, PAGE_SIZE, "%s\n",
- desc->irq_data.chip->name);
- }
- raw_spin_unlock_irq(&desc->lock);
- return ret;
+ guard(raw_spinlock_irq)(&desc->lock);
+ if (desc->irq_data.chip && desc->irq_data.chip->name)
+ return scnprintf(buf, PAGE_SIZE, "%s\n", desc->irq_data.chip->name);
+ return 0;
}
IRQ_ATTR_RO(chip_name);
-static ssize_t hwirq_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+static ssize_t hwirq_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
- ssize_t ret = 0;
-
- raw_spin_lock_irq(&desc->lock);
- if (desc->irq_data.domain)
- ret = sprintf(buf, "%lu\n", desc->irq_data.hwirq);
- raw_spin_unlock_irq(&desc->lock);
- return ret;
+ guard(raw_spinlock_irq)(&desc->lock);
+ return desc->irq_data.domain ? sprintf(buf, "%lu\n", desc->irq_data.hwirq) : 0;
}
IRQ_ATTR_RO(hwirq);
-static ssize_t type_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
- ssize_t ret = 0;
-
- raw_spin_lock_irq(&desc->lock);
- ret = sprintf(buf, "%s\n",
- irqd_is_level_type(&desc->irq_data) ? "level" : "edge");
- raw_spin_unlock_irq(&desc->lock);
-
- return ret;
+ guard(raw_spinlock_irq)(&desc->lock);
+ return sprintf(buf, "%s\n", irqd_is_level_type(&desc->irq_data) ? "level" : "edge");
}
IRQ_ATTR_RO(type);
-static ssize_t wakeup_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+static ssize_t wakeup_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
- ssize_t ret = 0;
-
- raw_spin_lock_irq(&desc->lock);
- ret = sprintf(buf, "%s\n", str_enabled_disabled(irqd_is_wakeup_set(&desc->irq_data)));
- raw_spin_unlock_irq(&desc->lock);
-
- return ret;
+ guard(raw_spinlock_irq)(&desc->lock);
+ return sprintf(buf, "%s\n", str_enabled_disabled(irqd_is_wakeup_set(&desc->irq_data)));
}
IRQ_ATTR_RO(wakeup);
-static ssize_t name_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+static ssize_t name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
- ssize_t ret = 0;
- raw_spin_lock_irq(&desc->lock);
- if (desc->name)
- ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name);
- raw_spin_unlock_irq(&desc->lock);
-
- return ret;
+ guard(raw_spinlock_irq)(&desc->lock);
+ return desc->name ? scnprintf(buf, PAGE_SIZE, "%s\n", desc->name) : 0;
}
IRQ_ATTR_RO(name);
-static ssize_t actions_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+static ssize_t actions_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
struct irqaction *action;
ssize_t ret = 0;
char *p = "";
- raw_spin_lock_irq(&desc->lock);
- for_each_action_of_desc(desc, action) {
- ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s",
- p, action->name);
- p = ",";
+ scoped_guard (raw_spinlock_irq, &desc->lock) {
+ for_each_action_of_desc(desc, action) {
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s", p, action->name);
+ p = ",";
+ }
}
- raw_spin_unlock_irq(&desc->lock);
-
- if (ret)
- ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
- return ret;
+ return ret ? ret + scnprintf(buf + ret, PAGE_SIZE - ret, "\n") : 0;
}
IRQ_ATTR_RO(actions);
@@ -418,19 +382,14 @@ static int __init irq_sysfs_init(void)
int irq;
/* Prevent concurrent irq alloc/free */
- irq_lock_sparse();
-
+ guard(mutex)(&sparse_irq_lock);
irq_kobj_base = kobject_create_and_add("irq", kernel_kobj);
- if (!irq_kobj_base) {
- irq_unlock_sparse();
+ if (!irq_kobj_base)
return -ENOMEM;
- }
/* Add the already allocated interrupts */
for_each_irq_desc(irq, desc)
irq_sysfs_add(irq, desc);
- irq_unlock_sparse();
-
return 0;
}
postcore_initcall(irq_sysfs_init);
@@ -573,12 +532,12 @@ static int alloc_descs(unsigned int star
return -ENOMEM;
}
-static int irq_expand_nr_irqs(unsigned int nr)
+static bool irq_expand_nr_irqs(unsigned int nr)
{
if (nr > MAX_SPARSE_IRQS)
- return -ENOMEM;
+ return false;
nr_irqs = nr;
- return 0;
+ return true;
}
int __init early_irq_init(void)
@@ -656,11 +615,9 @@ EXPORT_SYMBOL(irq_to_desc);
static void free_desc(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
- unsigned long flags;
- raw_spin_lock_irqsave(&desc->lock, flags);
- desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL);
- raw_spin_unlock_irqrestore(&desc->lock, flags);
+ scoped_guard (raw_spinlock_irqsave, &desc->lock)
+ desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL);
delete_irq_desc(irq);
}
@@ -681,14 +638,13 @@ static inline int alloc_descs(unsigned i
static int irq_expand_nr_irqs(unsigned int nr)
{
- return -ENOMEM;
+ return false;
}
void irq_mark_irq(unsigned int irq)
{
- mutex_lock(&sparse_irq_lock);
+ guard(mutex)(&sparse_irq_lock);
irq_insert_desc(irq, irq_desc + irq);
- mutex_unlock(&sparse_irq_lock);
}
#ifdef CONFIG_GENERIC_IRQ_LEGACY
@@ -827,11 +783,9 @@ void irq_free_descs(unsigned int from, u
if (from >= nr_irqs || (from + cnt) > nr_irqs)
return;
- mutex_lock(&sparse_irq_lock);
+ guard(mutex)(&sparse_irq_lock);
for (i = 0; i < cnt; i++)
free_desc(from + i);
-
- mutex_unlock(&sparse_irq_lock);
}
EXPORT_SYMBOL_GPL(irq_free_descs);
@@ -848,11 +802,10 @@ EXPORT_SYMBOL_GPL(irq_free_descs);
*
* Returns the first irq number or error code
*/
-int __ref
-__irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
- struct module *owner, const struct irq_affinity_desc *affinity)
+int __ref __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
+ struct module *owner, const struct irq_affinity_desc *affinity)
{
- int start, ret;
+ int start;
if (!cnt)
return -EINVAL;
@@ -870,22 +823,17 @@ int __ref
from = arch_dynirq_lower_bound(from);
}
- mutex_lock(&sparse_irq_lock);
+ guard(mutex)(&sparse_irq_lock);
start = irq_find_free_area(from, cnt);
- ret = -EEXIST;
if (irq >=0 && start != irq)
- goto unlock;
+ return -EEXIST;
if (start + cnt > nr_irqs) {
- ret = irq_expand_nr_irqs(start + cnt);
- if (ret)
- goto unlock;
+ if (!irq_expand_nr_irqs(start + cnt))
+ return -ENOMEM;
}
- ret = alloc_descs(start, cnt, node, affinity, owner);
-unlock:
- mutex_unlock(&sparse_irq_lock);
- return ret;
+ return alloc_descs(start, cnt, node, affinity, owner);
}
EXPORT_SYMBOL_GPL(__irq_alloc_descs);
next prev parent reply other threads:[~2025-03-13 15:59 UTC|newest]
Thread overview: 50+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-03-13 15:59 [patch 00/46] genirq: Cleanups and conversion to lock guards Thomas Gleixner
2025-03-13 15:59 ` [patch 01/46] genirq: Provide conditional " Thomas Gleixner
2025-03-13 15:59 ` Thomas Gleixner [this message]
2025-03-14 10:57 ` [patch 02/46] genirq/irqdesc: Switch to " Jiri Slaby
2025-03-13 15:59 ` [patch 03/46] genirq/autoprobe: " Thomas Gleixner
2025-03-13 15:59 ` [patch 04/46] genirq/pm: " Thomas Gleixner
2025-03-13 15:59 ` [patch 05/46] genirq/resend: " Thomas Gleixner
2025-03-17 8:22 ` Shrikanth Hegde
2025-03-13 15:59 ` [patch 06/46] genirq/proc: " Thomas Gleixner
2025-03-13 15:59 ` [patch 07/46] genirq/spurious: Cleanup code Thomas Gleixner
2025-03-13 15:59 ` [patch 08/46] genirq/spurious: Switch to lock guards Thomas Gleixner
2025-03-13 15:59 ` [patch 09/46] genirq/cpuhotplug: Convert " Thomas Gleixner
2025-03-13 15:59 ` [patch 10/46] genirq/debugfs: " Thomas Gleixner
2025-03-13 16:00 ` [patch 11/46] genirq/chip: Prepare for code reduction Thomas Gleixner
2025-03-13 16:00 ` [patch 12/46] genirq/chip: Rework handle_nested_irq() Thomas Gleixner
2025-03-13 16:00 ` [patch 13/46] genirq/chip: Rework handle_simple_irq() Thomas Gleixner
2025-03-13 16:00 ` [patch 14/46] genirq/chip: Rework handle_untracked_irq() Thomas Gleixner
2025-03-13 16:00 ` [patch 15/46] genirq/chip: Rework handle_level_irq() Thomas Gleixner
2025-03-13 16:00 ` [patch 16/46] genirq/chip: Rework handle_eoi_irq() Thomas Gleixner
2025-03-13 16:00 ` [patch 17/46] genirq/chip: Rework handle_edge_irq() Thomas Gleixner
2025-03-13 16:00 ` [patch 18/46] genirq/chip: Rework handle_edge_eoi_irq() Thomas Gleixner
2025-03-13 16:00 ` [patch 19/46] genirq/chip: Rework handle_fasteoi_ack_irq() Thomas Gleixner
2025-03-13 16:00 ` [patch 20/46] genirq/chip: Rework handle_fasteoi_mask_irq() Thomas Gleixner
2025-03-13 16:00 ` [patch 21/46] genirq/chip: Use lock guards where applicable Thomas Gleixner
2025-03-13 16:00 ` [patch 22/46] genirq/chip: Rework irq_set_chip() Thomas Gleixner
2025-03-13 16:00 ` [patch 23/46] genirq/chip: Rework irq_set_irq_type() Thomas Gleixner
2025-03-13 16:00 ` [patch 24/46] genirq/chip: Rework irq_set_handler_data() Thomas Gleixner
2025-03-13 16:00 ` [patch 25/46] genirq/chip: Rework irq_set_msi_desc_off() Thomas Gleixner
2025-03-13 16:00 ` [patch 26/46] genirq/chip: Rework irq_set_chip_data() Thomas Gleixner
2025-03-13 16:00 ` [patch 27/46] genirq/chip: Rework irq_set_handler() variants Thomas Gleixner
2025-03-13 16:00 ` [patch 28/46] genirq/chip: Rework irq_modify_status() Thomas Gleixner
2025-03-13 16:00 ` [patch 29/46] genirq/manage: Cleanup kernel doc comments Thomas Gleixner
2025-03-13 16:00 ` [patch 30/46] genirq/manage: Convert to lock guards Thomas Gleixner
2025-03-13 16:00 ` [patch 31/46] genirq/manage: Rework irq_update_affinity_desc() Thomas Gleixner
2025-03-13 16:00 ` [patch 32/46] genirq/manage: Rework __irq_apply_affinity_hint() Thomas Gleixner
2025-03-13 16:00 ` [patch 33/46] genirq/manage: Rework irq_set_vcpu_affinity() Thomas Gleixner
2025-03-13 16:00 ` [patch 34/46] genirq/manage: Rework __disable_irq_nosync() Thomas Gleixner
2025-03-13 16:00 ` [patch 35/46] genirq/manage: Rework enable_irq() Thomas Gleixner
2025-03-13 16:00 ` [patch 36/46] genirq/manage: Rework irq_set_irq_wake() Thomas Gleixner
2025-03-13 16:00 ` [patch 37/46] genirq/manage: Rework can_request_irq() Thomas Gleixner
2025-03-13 16:00 ` [patch 38/46] genirq/manage: Rework irq_set_parent() Thomas Gleixner
2025-03-13 16:00 ` [patch 39/46] genirq/manage: Rework enable_percpu_irq() Thomas Gleixner
2025-03-13 16:00 ` [patch 40/46] genirq/manage: Rework irq_percpu_is_enabled() Thomas Gleixner
2025-03-13 16:00 ` [patch 41/46] genirq/manage: Rework disable_percpu_irq() Thomas Gleixner
2025-03-13 16:00 ` [patch 42/46] genirq/manage: Rework prepare_percpu_nmi() Thomas Gleixner
2025-03-13 16:00 ` [patch 43/46] genirq/manage: Rework teardown_percpu_nmi() Thomas Gleixner
2025-03-13 16:00 ` [patch 44/46] genirq/manage: Rework irq_get_irqchip_state() Thomas Gleixner
2025-03-13 16:01 ` [patch 45/46] genirq/manage: Rework irq_set_irqchip_state() Thomas Gleixner
2025-03-13 16:01 ` [patch 46/46] genirq: Remove irq_[get|put]_desc*() Thomas Gleixner
2025-03-14 9:04 ` [patch 00/46] genirq: Cleanups and conversion to lock guards Peter Zijlstra
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250313155914.010145118@linutronix.de \
--to=tglx@linutronix.de \
--cc=jirislaby@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=peterz@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox