* [PATCH 1/5] iio: core: move to cleanup.h magic
2024-02-21 13:26 [PATCH 0/5] iio: move IIO to the cleanup.h magic Nuno Sa via B4 Relay
@ 2024-02-21 13:26 ` Nuno Sa via B4 Relay
2024-02-22 19:22 ` Jonathan Cameron
2024-02-21 13:26 ` [PATCH 2/5] iio: events: move to the " Nuno Sa via B4 Relay
` (3 subsequent siblings)
4 siblings, 1 reply; 8+ messages in thread
From: Nuno Sa via B4 Relay @ 2024-02-21 13:26 UTC (permalink / raw)
To: linux-iio; +Cc: Jonathan Cameron, Lars-Peter Clausen, Nuno Sa
From: Nuno Sa <nuno.sa@analog.com>
Use the new cleanup magic for handling mutexes in IIO. This allows us to
greatly simplify some code paths.
Note that we keep the plain mutex calls in the
iio_device_release|acquire() APIs since in there the macros would likely
not help much (as we want to keep the lock acquired when he leave the
APIs).
Signed-off-by: Nuno Sa <nuno.sa@analog.com>
---
drivers/iio/industrialio-core.c | 52 +++++++++++++++--------------------------
1 file changed, 19 insertions(+), 33 deletions(-)
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 9b2877fe8689..7e6497828364 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -11,6 +11,7 @@
#include <linux/anon_inodes.h>
#include <linux/cdev.h>
+#include <linux/cleanup.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -268,20 +269,16 @@ EXPORT_SYMBOL(iio_read_const_attr);
*/
int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id)
{
- int ret;
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
const struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
- ret = mutex_lock_interruptible(&iio_dev_opaque->mlock);
- if (ret)
- return ret;
- if ((ev_int && iio_event_enabled(ev_int)) ||
- iio_buffer_enabled(indio_dev)) {
- mutex_unlock(&iio_dev_opaque->mlock);
- return -EBUSY;
+ scoped_cond_guard(mutex_intr, return -EINTR, &iio_dev_opaque->mlock) {
+ if ((ev_int && iio_event_enabled(ev_int)) ||
+ iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
+ iio_dev_opaque->clock_id = clock_id;
}
- iio_dev_opaque->clock_id = clock_id;
- mutex_unlock(&iio_dev_opaque->mlock);
return 0;
}
@@ -1806,31 +1803,22 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
struct iio_dev *indio_dev = ib->indio_dev;
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
struct iio_ioctl_handler *h;
- int ret = -ENODEV;
-
- mutex_lock(&iio_dev_opaque->info_exist_lock);
+ guard(mutex)(&iio_dev_opaque->info_exist_lock);
/*
* The NULL check here is required to prevent crashing when a device
* is being removed while userspace would still have open file handles
* to try to access this device.
*/
if (!indio_dev->info)
- goto out_unlock;
+ return -ENODEV;
list_for_each_entry(h, &iio_dev_opaque->ioctl_handlers, entry) {
- ret = h->ioctl(indio_dev, filp, cmd, arg);
- if (ret != IIO_IOCTL_UNHANDLED)
- break;
+ if (h->ioctl(indio_dev, filp, cmd, arg) != IIO_IOCTL_UNHANDLED)
+ return 0;
}
- if (ret == IIO_IOCTL_UNHANDLED)
- ret = -ENODEV;
-
-out_unlock:
- mutex_unlock(&iio_dev_opaque->info_exist_lock);
-
- return ret;
+ return -ENODEV;
}
static const struct file_operations iio_buffer_fileops = {
@@ -2058,18 +2046,16 @@ void iio_device_unregister(struct iio_dev *indio_dev)
cdev_device_del(&iio_dev_opaque->chrdev, &indio_dev->dev);
- mutex_lock(&iio_dev_opaque->info_exist_lock);
+ scoped_guard(mutex, &iio_dev_opaque->info_exist_lock) {
+ iio_device_unregister_debugfs(indio_dev);
- iio_device_unregister_debugfs(indio_dev);
+ iio_disable_all_buffers(indio_dev);
- iio_disable_all_buffers(indio_dev);
+ indio_dev->info = NULL;
- indio_dev->info = NULL;
-
- iio_device_wakeup_eventset(indio_dev);
- iio_buffer_wakeup_poll(indio_dev);
-
- mutex_unlock(&iio_dev_opaque->info_exist_lock);
+ iio_device_wakeup_eventset(indio_dev);
+ iio_buffer_wakeup_poll(indio_dev);
+ }
iio_buffers_free_sysfs_and_mask(indio_dev);
}
--
2.43.0
^ permalink raw reply related [flat|nested] 8+ messages in thread* Re: [PATCH 1/5] iio: core: move to cleanup.h magic
2024-02-21 13:26 ` [PATCH 1/5] iio: core: move to " Nuno Sa via B4 Relay
@ 2024-02-22 19:22 ` Jonathan Cameron
2024-02-23 8:20 ` Nuno Sá
0 siblings, 1 reply; 8+ messages in thread
From: Jonathan Cameron @ 2024-02-22 19:22 UTC (permalink / raw)
To: Nuno Sa via B4 Relay; +Cc: nuno.sa, linux-iio, Lars-Peter Clausen
On Wed, 21 Feb 2024 14:26:52 +0100
Nuno Sa via B4 Relay <devnull+nuno.sa.analog.com@kernel.org> wrote:
> From: Nuno Sa <nuno.sa@analog.com>
>
> Use the new cleanup magic for handling mutexes in IIO. This allows us to
> greatly simplify some code paths.
>
> Note that we keep the plain mutex calls in the
> iio_device_release|acquire() APIs since in there the macros would likely
> not help much (as we want to keep the lock acquired when he leave the
> APIs).
>
> Signed-off-by: Nuno Sa <nuno.sa@analog.com>
> ---
> drivers/iio/industrialio-core.c | 52 +++++++++++++++--------------------------
> 1 file changed, 19 insertions(+), 33 deletions(-)
>
> diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
> index 9b2877fe8689..7e6497828364 100644
> --- a/drivers/iio/industrialio-core.c
> +++ b/drivers/iio/industrialio-core.c
> @@ -11,6 +11,7 @@
> @@ -1806,31 +1803,22 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
> struct iio_dev *indio_dev = ib->indio_dev;
> struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
> struct iio_ioctl_handler *h;
> - int ret = -ENODEV;
> -
> - mutex_lock(&iio_dev_opaque->info_exist_lock);
>
> + guard(mutex)(&iio_dev_opaque->info_exist_lock);
> /*
> * The NULL check here is required to prevent crashing when a device
> * is being removed while userspace would still have open file handles
> * to try to access this device.
> */
> if (!indio_dev->info)
> - goto out_unlock;
> + return -ENODEV;
>
> list_for_each_entry(h, &iio_dev_opaque->ioctl_handlers, entry) {
> - ret = h->ioctl(indio_dev, filp, cmd, arg);
> - if (ret != IIO_IOCTL_UNHANDLED)
> - break;
> + if (h->ioctl(indio_dev, filp, cmd, arg) != IIO_IOCTL_UNHANDLED)
> + return 0;
Changes the return value if ret returns something other than IIO_IOCTL_UNHANDLED
which it can I think...
> }
>
> - if (ret == IIO_IOCTL_UNHANDLED)
> - ret = -ENODEV;
> -
> -out_unlock:
> - mutex_unlock(&iio_dev_opaque->info_exist_lock);
> -
> - return ret;
> + return -ENODEV;
> }
^ permalink raw reply [flat|nested] 8+ messages in thread* Re: [PATCH 1/5] iio: core: move to cleanup.h magic
2024-02-22 19:22 ` Jonathan Cameron
@ 2024-02-23 8:20 ` Nuno Sá
0 siblings, 0 replies; 8+ messages in thread
From: Nuno Sá @ 2024-02-23 8:20 UTC (permalink / raw)
To: Jonathan Cameron, Nuno Sa via B4 Relay
Cc: nuno.sa, linux-iio, Lars-Peter Clausen
On Thu, 2024-02-22 at 19:22 +0000, Jonathan Cameron wrote:
> On Wed, 21 Feb 2024 14:26:52 +0100
> Nuno Sa via B4 Relay <devnull+nuno.sa.analog.com@kernel.org> wrote:
>
> > From: Nuno Sa <nuno.sa@analog.com>
> >
> > Use the new cleanup magic for handling mutexes in IIO. This allows us to
> > greatly simplify some code paths.
> >
> > Note that we keep the plain mutex calls in the
> > iio_device_release|acquire() APIs since in there the macros would likely
> > not help much (as we want to keep the lock acquired when he leave the
> > APIs).
> >
> > Signed-off-by: Nuno Sa <nuno.sa@analog.com>
> > ---
> > drivers/iio/industrialio-core.c | 52 +++++++++++++++-----------------------
> > ---
> > 1 file changed, 19 insertions(+), 33 deletions(-)
> >
> > diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-
> > core.c
> > index 9b2877fe8689..7e6497828364 100644
> > --- a/drivers/iio/industrialio-core.c
> > +++ b/drivers/iio/industrialio-core.c
> > @@ -11,6 +11,7 @@
>
>
>
> > @@ -1806,31 +1803,22 @@ static long iio_ioctl(struct file *filp, unsigned
> > int cmd, unsigned long arg)
> > struct iio_dev *indio_dev = ib->indio_dev;
> > struct iio_dev_opaque *iio_dev_opaque =
> > to_iio_dev_opaque(indio_dev);
> > struct iio_ioctl_handler *h;
> > - int ret = -ENODEV;
> > -
> > - mutex_lock(&iio_dev_opaque->info_exist_lock);
> >
> > + guard(mutex)(&iio_dev_opaque->info_exist_lock);
> > /*
> > * The NULL check here is required to prevent crashing when a
> > device
> > * is being removed while userspace would still have open file
> > handles
> > * to try to access this device.
> > */
> > if (!indio_dev->info)
> > - goto out_unlock;
> > + return -ENODEV;
> >
> > list_for_each_entry(h, &iio_dev_opaque->ioctl_handlers, entry) {
> > - ret = h->ioctl(indio_dev, filp, cmd, arg);
> > - if (ret != IIO_IOCTL_UNHANDLED)
> > - break;
> > + if (h->ioctl(indio_dev, filp, cmd, arg) !=
> > IIO_IOCTL_UNHANDLED)
> > + return 0;
>
> Changes the return value if ret returns something other than
> IIO_IOCTL_UNHANDLED
> which it can I think...
Yeah, I think for the multi buffer support we can actually return the anon fd...
Pffff, one of those changes I made right before sending out the series thinking
I could remove one LOC without properly looking at the code :/
Will send a v2.
- Nuno Sá
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH 2/5] iio: events: move to the cleanup.h magic
2024-02-21 13:26 [PATCH 0/5] iio: move IIO to the cleanup.h magic Nuno Sa via B4 Relay
2024-02-21 13:26 ` [PATCH 1/5] iio: core: move to " Nuno Sa via B4 Relay
@ 2024-02-21 13:26 ` Nuno Sa via B4 Relay
2024-02-21 13:26 ` [PATCH 3/5] iio: trigger: " Nuno Sa via B4 Relay
` (2 subsequent siblings)
4 siblings, 0 replies; 8+ messages in thread
From: Nuno Sa via B4 Relay @ 2024-02-21 13:26 UTC (permalink / raw)
To: linux-iio; +Cc: Jonathan Cameron, Lars-Peter Clausen, Nuno Sa
From: Nuno Sa <nuno.sa@analog.com>
Use the new cleanup magic for handling mutexes in IIO. This allows us to
greatly simplify some code paths.
Signed-off-by: Nuno Sa <nuno.sa@analog.com>
---
drivers/iio/industrialio-event.c | 42 +++++++++++++++++-----------------------
1 file changed, 18 insertions(+), 24 deletions(-)
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index 910c1f14abd5..ef3cecbce915 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -7,6 +7,7 @@
*/
#include <linux/anon_inodes.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/kernel.h>
@@ -146,11 +147,10 @@ static ssize_t iio_event_chrdev_read(struct file *filep,
return -ENODEV;
}
- if (mutex_lock_interruptible(&ev_int->read_lock))
- return -ERESTARTSYS;
- ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
- mutex_unlock(&ev_int->read_lock);
-
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS,
+ &ev_int->read_lock)
+ ret = kfifo_to_user(&ev_int->det_events, buf, count,
+ &copied);
if (ret)
return ret;
@@ -198,28 +198,22 @@ static int iio_event_getfd(struct iio_dev *indio_dev)
if (ev_int == NULL)
return -ENODEV;
- fd = mutex_lock_interruptible(&iio_dev_opaque->mlock);
- if (fd)
- return fd;
+ scoped_cond_guard(mutex_intr, return -EINTR, &iio_dev_opaque->mlock) {
+ if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags))
+ return -EBUSY;
- if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
- fd = -EBUSY;
- goto unlock;
+ iio_device_get(indio_dev);
+
+ fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops,
+ indio_dev, O_RDONLY | O_CLOEXEC);
+ if (fd < 0) {
+ clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
+ iio_device_put(indio_dev);
+ } else {
+ kfifo_reset_out(&ev_int->det_events);
+ }
}
- iio_device_get(indio_dev);
-
- fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops,
- indio_dev, O_RDONLY | O_CLOEXEC);
- if (fd < 0) {
- clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
- iio_device_put(indio_dev);
- } else {
- kfifo_reset_out(&ev_int->det_events);
- }
-
-unlock:
- mutex_unlock(&iio_dev_opaque->mlock);
return fd;
}
--
2.43.0
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH 3/5] iio: trigger: move to the cleanup.h magic
2024-02-21 13:26 [PATCH 0/5] iio: move IIO to the cleanup.h magic Nuno Sa via B4 Relay
2024-02-21 13:26 ` [PATCH 1/5] iio: core: move to " Nuno Sa via B4 Relay
2024-02-21 13:26 ` [PATCH 2/5] iio: events: move to the " Nuno Sa via B4 Relay
@ 2024-02-21 13:26 ` Nuno Sa via B4 Relay
2024-02-21 13:26 ` [PATCH 4/5] iio: buffer: iio: core: " Nuno Sa via B4 Relay
2024-02-21 13:26 ` [PATCH 5/5] iio: inkern: " Nuno Sa via B4 Relay
4 siblings, 0 replies; 8+ messages in thread
From: Nuno Sa via B4 Relay @ 2024-02-21 13:26 UTC (permalink / raw)
To: linux-iio; +Cc: Jonathan Cameron, Lars-Peter Clausen, Nuno Sa
From: Nuno Sa <nuno.sa@analog.com>
Use the new cleanup magic for handling mutexes in IIO. This allows us to
greatly simplify some code paths.
Signed-off-by: Nuno Sa <nuno.sa@analog.com>
---
drivers/iio/industrialio-trigger.c | 64 ++++++++++++++++----------------------
1 file changed, 26 insertions(+), 38 deletions(-)
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index 18f83158f637..e4f0802fdd1d 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -4,6 +4,7 @@
* Copyright (c) 2008 Jonathan Cameron
*/
+#include <linux/cleanup.h>
#include <linux/kernel.h>
#include <linux/idr.h>
#include <linux/err.h>
@@ -80,19 +81,18 @@ int iio_trigger_register(struct iio_trigger *trig_info)
goto error_unregister_id;
/* Add to list of available triggers held by the IIO core */
- mutex_lock(&iio_trigger_list_lock);
- if (__iio_trigger_find_by_name(trig_info->name)) {
- pr_err("Duplicate trigger name '%s'\n", trig_info->name);
- ret = -EEXIST;
- goto error_device_del;
+ scoped_guard(mutex, &iio_trigger_list_lock) {
+ if (__iio_trigger_find_by_name(trig_info->name)) {
+ pr_err("Duplicate trigger name '%s'\n", trig_info->name);
+ ret = -EEXIST;
+ goto error_device_del;
+ }
+ list_add_tail(&trig_info->list, &iio_trigger_list);
}
- list_add_tail(&trig_info->list, &iio_trigger_list);
- mutex_unlock(&iio_trigger_list_lock);
return 0;
error_device_del:
- mutex_unlock(&iio_trigger_list_lock);
device_del(&trig_info->dev);
error_unregister_id:
ida_free(&iio_trigger_ida, trig_info->id);
@@ -102,9 +102,8 @@ EXPORT_SYMBOL(iio_trigger_register);
void iio_trigger_unregister(struct iio_trigger *trig_info)
{
- mutex_lock(&iio_trigger_list_lock);
- list_del(&trig_info->list);
- mutex_unlock(&iio_trigger_list_lock);
+ scoped_guard(mutex, &iio_trigger_list_lock)
+ list_del(&trig_info->list);
ida_free(&iio_trigger_ida, trig_info->id);
/* Possible issue in here */
@@ -120,12 +119,11 @@ int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *tri
return -EINVAL;
iio_dev_opaque = to_iio_dev_opaque(indio_dev);
- mutex_lock(&iio_dev_opaque->mlock);
+ guard(mutex)(&iio_dev_opaque->mlock);
WARN_ON(iio_dev_opaque->trig_readonly);
indio_dev->trig = iio_trigger_get(trig);
iio_dev_opaque->trig_readonly = true;
- mutex_unlock(&iio_dev_opaque->mlock);
return 0;
}
@@ -145,18 +143,14 @@ static struct iio_trigger *__iio_trigger_find_by_name(const char *name)
static struct iio_trigger *iio_trigger_acquire_by_name(const char *name)
{
- struct iio_trigger *trig = NULL, *iter;
+ struct iio_trigger *iter;
- mutex_lock(&iio_trigger_list_lock);
+ guard(mutex)(&iio_trigger_list_lock);
list_for_each_entry(iter, &iio_trigger_list, list)
- if (sysfs_streq(iter->name, name)) {
- trig = iter;
- iio_trigger_get(trig);
- break;
- }
- mutex_unlock(&iio_trigger_list_lock);
+ if (sysfs_streq(iter->name, name))
+ return iio_trigger_get(iter);
- return trig;
+ return NULL;
}
static void iio_reenable_work_fn(struct work_struct *work)
@@ -259,11 +253,10 @@ static int iio_trigger_get_irq(struct iio_trigger *trig)
{
int ret;
- mutex_lock(&trig->pool_lock);
- ret = bitmap_find_free_region(trig->pool,
- CONFIG_IIO_CONSUMERS_PER_TRIGGER,
- ilog2(1));
- mutex_unlock(&trig->pool_lock);
+ scoped_guard(mutex, &trig->pool_lock)
+ ret = bitmap_find_free_region(trig->pool,
+ CONFIG_IIO_CONSUMERS_PER_TRIGGER,
+ ilog2(1));
if (ret >= 0)
ret += trig->subirq_base;
@@ -272,9 +265,8 @@ static int iio_trigger_get_irq(struct iio_trigger *trig)
static void iio_trigger_put_irq(struct iio_trigger *trig, int irq)
{
- mutex_lock(&trig->pool_lock);
+ guard(mutex)(&trig->pool_lock);
clear_bit(irq - trig->subirq_base, trig->pool);
- mutex_unlock(&trig->pool_lock);
}
/* Complexity in here. With certain triggers (datardy) an acknowledgement
@@ -451,16 +443,12 @@ static ssize_t current_trigger_store(struct device *dev,
struct iio_trigger *trig;
int ret;
- mutex_lock(&iio_dev_opaque->mlock);
- if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
- mutex_unlock(&iio_dev_opaque->mlock);
- return -EBUSY;
+ scoped_guard(mutex, &iio_dev_opaque->mlock) {
+ if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED)
+ return -EBUSY;
+ if (iio_dev_opaque->trig_readonly)
+ return -EPERM;
}
- if (iio_dev_opaque->trig_readonly) {
- mutex_unlock(&iio_dev_opaque->mlock);
- return -EPERM;
- }
- mutex_unlock(&iio_dev_opaque->mlock);
trig = iio_trigger_acquire_by_name(buf);
if (oldtrig == trig) {
--
2.43.0
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH 4/5] iio: buffer: iio: core: move to the cleanup.h magic
2024-02-21 13:26 [PATCH 0/5] iio: move IIO to the cleanup.h magic Nuno Sa via B4 Relay
` (2 preceding siblings ...)
2024-02-21 13:26 ` [PATCH 3/5] iio: trigger: " Nuno Sa via B4 Relay
@ 2024-02-21 13:26 ` Nuno Sa via B4 Relay
2024-02-21 13:26 ` [PATCH 5/5] iio: inkern: " Nuno Sa via B4 Relay
4 siblings, 0 replies; 8+ messages in thread
From: Nuno Sa via B4 Relay @ 2024-02-21 13:26 UTC (permalink / raw)
To: linux-iio; +Cc: Jonathan Cameron, Lars-Peter Clausen, Nuno Sa
From: Nuno Sa <nuno.sa@analog.com>
Use the new cleanup magic for handling mutexes in IIO. This allows us to
greatly simplify some code paths.
Signed-off-by: Nuno Sa <nuno.sa@analog.com>
---
drivers/iio/industrialio-buffer.c | 105 ++++++++++++++------------------------
1 file changed, 38 insertions(+), 67 deletions(-)
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index b581a7e80566..ec6bc881cf13 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -10,6 +10,7 @@
* - Alternative access techniques?
*/
#include <linux/anon_inodes.h>
+#include <linux/cleanup.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/device.h>
@@ -533,28 +534,25 @@ static ssize_t iio_scan_el_store(struct device *dev,
ret = kstrtobool(buf, &state);
if (ret < 0)
return ret;
- mutex_lock(&iio_dev_opaque->mlock);
- if (iio_buffer_is_active(buffer)) {
- ret = -EBUSY;
- goto error_ret;
- }
+
+ guard(mutex)(&iio_dev_opaque->mlock);
+ if (iio_buffer_is_active(buffer))
+ return -EBUSY;
+
ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
if (ret < 0)
- goto error_ret;
+ return ret;
if (!state && ret) {
ret = iio_scan_mask_clear(buffer, this_attr->address);
if (ret)
- goto error_ret;
+ return ret;
} else if (state && !ret) {
ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
if (ret)
- goto error_ret;
+ return ret;
}
-error_ret:
- mutex_unlock(&iio_dev_opaque->mlock);
-
- return ret < 0 ? ret : len;
+ return len;
}
static ssize_t iio_scan_el_ts_show(struct device *dev,
@@ -581,16 +579,13 @@ static ssize_t iio_scan_el_ts_store(struct device *dev,
if (ret < 0)
return ret;
- mutex_lock(&iio_dev_opaque->mlock);
- if (iio_buffer_is_active(buffer)) {
- ret = -EBUSY;
- goto error_ret;
- }
- buffer->scan_timestamp = state;
-error_ret:
- mutex_unlock(&iio_dev_opaque->mlock);
+ guard(mutex)(&iio_dev_opaque->mlock);
+ if (iio_buffer_is_active(buffer))
+ return -EBUSY;
- return ret ? ret : len;
+ buffer->scan_timestamp = state;
+
+ return len;
}
static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
@@ -674,21 +669,16 @@ static ssize_t length_store(struct device *dev, struct device_attribute *attr,
if (val == buffer->length)
return len;
- mutex_lock(&iio_dev_opaque->mlock);
- if (iio_buffer_is_active(buffer)) {
- ret = -EBUSY;
- } else {
- buffer->access->set_length(buffer, val);
- ret = 0;
- }
- if (ret)
- goto out;
+ guard(mutex)(&iio_dev_opaque->mlock);
+ if (iio_buffer_is_active(buffer))
+ return -EBUSY;
+
+ buffer->access->set_length(buffer, val);
+
if (buffer->length && buffer->length < buffer->watermark)
buffer->watermark = buffer->length;
-out:
- mutex_unlock(&iio_dev_opaque->mlock);
- return ret ? ret : len;
+ return len;
}
static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
@@ -1268,7 +1258,6 @@ int iio_update_buffers(struct iio_dev *indio_dev,
struct iio_buffer *remove_buffer)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
- int ret;
if (insert_buffer == remove_buffer)
return 0;
@@ -1277,8 +1266,8 @@ int iio_update_buffers(struct iio_dev *indio_dev,
insert_buffer->direction == IIO_BUFFER_DIRECTION_OUT)
return -EINVAL;
- mutex_lock(&iio_dev_opaque->info_exist_lock);
- mutex_lock(&iio_dev_opaque->mlock);
+ guard(mutex)(&iio_dev_opaque->info_exist_lock);
+ guard(mutex)(&iio_dev_opaque->mlock);
if (insert_buffer && iio_buffer_is_active(insert_buffer))
insert_buffer = NULL;
@@ -1286,23 +1275,13 @@ int iio_update_buffers(struct iio_dev *indio_dev,
if (remove_buffer && !iio_buffer_is_active(remove_buffer))
remove_buffer = NULL;
- if (!insert_buffer && !remove_buffer) {
- ret = 0;
- goto out_unlock;
- }
+ if (!insert_buffer && !remove_buffer)
+ return 0;
- if (!indio_dev->info) {
- ret = -ENODEV;
- goto out_unlock;
- }
+ if (!indio_dev->info)
+ return -ENODEV;
- ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
-
-out_unlock:
- mutex_unlock(&iio_dev_opaque->mlock);
- mutex_unlock(&iio_dev_opaque->info_exist_lock);
-
- return ret;
+ return __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
}
EXPORT_SYMBOL_GPL(iio_update_buffers);
@@ -1326,21 +1305,19 @@ static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
if (ret < 0)
return ret;
- mutex_lock(&iio_dev_opaque->mlock);
+ guard(mutex)(&iio_dev_opaque->mlock);
/* Find out if it is in the list */
inlist = iio_buffer_is_active(buffer);
/* Already in desired state */
if (inlist == requested_state)
- goto done;
+ return len;
if (requested_state)
ret = __iio_update_buffers(indio_dev, buffer, NULL);
else
ret = __iio_update_buffers(indio_dev, NULL, buffer);
-done:
- mutex_unlock(&iio_dev_opaque->mlock);
return (ret < 0) ? ret : len;
}
@@ -1368,23 +1345,17 @@ static ssize_t watermark_store(struct device *dev,
if (!val)
return -EINVAL;
- mutex_lock(&iio_dev_opaque->mlock);
+ guard(mutex)(&iio_dev_opaque->mlock);
- if (val > buffer->length) {
- ret = -EINVAL;
- goto out;
- }
+ if (val > buffer->length)
+ return -EINVAL;
- if (iio_buffer_is_active(buffer)) {
- ret = -EBUSY;
- goto out;
- }
+ if (iio_buffer_is_active(buffer))
+ return -EBUSY;
buffer->watermark = val;
-out:
- mutex_unlock(&iio_dev_opaque->mlock);
- return ret ? ret : len;
+ return len;
}
static ssize_t data_available_show(struct device *dev,
--
2.43.0
^ permalink raw reply related [flat|nested] 8+ messages in thread* [PATCH 5/5] iio: inkern: move to the cleanup.h magic
2024-02-21 13:26 [PATCH 0/5] iio: move IIO to the cleanup.h magic Nuno Sa via B4 Relay
` (3 preceding siblings ...)
2024-02-21 13:26 ` [PATCH 4/5] iio: buffer: iio: core: " Nuno Sa via B4 Relay
@ 2024-02-21 13:26 ` Nuno Sa via B4 Relay
4 siblings, 0 replies; 8+ messages in thread
From: Nuno Sa via B4 Relay @ 2024-02-21 13:26 UTC (permalink / raw)
To: linux-iio; +Cc: Jonathan Cameron, Lars-Peter Clausen, Nuno Sa
From: Nuno Sa <nuno.sa@analog.com>
Use the new cleanup magic for handling mutexes in IIO. This allows us to
greatly simplify some code paths.
Signed-off-by: Nuno Sa <nuno.sa@analog.com>
---
drivers/iio/inkern.c | 224 ++++++++++++++++-----------------------------------
1 file changed, 71 insertions(+), 153 deletions(-)
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 7a1f6713318a..6a1d6ff8eb97 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -3,6 +3,7 @@
*
* Copyright (c) 2011 Jonathan Cameron
*/
+#include <linux/cleanup.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/minmax.h>
@@ -43,30 +44,26 @@ static int iio_map_array_unregister_locked(struct iio_dev *indio_dev)
int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
{
- int i = 0, ret = 0;
+ int i = 0;
struct iio_map_internal *mapi;
if (!maps)
return 0;
- mutex_lock(&iio_map_list_lock);
+ guard(mutex)(&iio_map_list_lock);
while (maps[i].consumer_dev_name) {
mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
if (!mapi) {
- ret = -ENOMEM;
- goto error_ret;
+ iio_map_array_unregister_locked(indio_dev);
+ return -ENOMEM;
}
mapi->map = &maps[i];
mapi->indio_dev = indio_dev;
list_add_tail(&mapi->l, &iio_map_list);
i++;
}
-error_ret:
- if (ret)
- iio_map_array_unregister_locked(indio_dev);
- mutex_unlock(&iio_map_list_lock);
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(iio_map_array_register);
@@ -75,13 +72,8 @@ EXPORT_SYMBOL_GPL(iio_map_array_register);
*/
int iio_map_array_unregister(struct iio_dev *indio_dev)
{
- int ret;
-
- mutex_lock(&iio_map_list_lock);
- ret = iio_map_array_unregister_locked(indio_dev);
- mutex_unlock(&iio_map_list_lock);
-
- return ret;
+ guard(mutex)(&iio_map_list_lock);
+ return iio_map_array_unregister_locked(indio_dev);
}
EXPORT_SYMBOL_GPL(iio_map_array_unregister);
@@ -337,17 +329,17 @@ static struct iio_channel *iio_channel_get_sys(const char *name,
return ERR_PTR(-ENODEV);
/* first find matching entry the channel map */
- mutex_lock(&iio_map_list_lock);
- list_for_each_entry(c_i, &iio_map_list, l) {
- if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
- (channel_name &&
- strcmp(channel_name, c_i->map->consumer_channel) != 0))
- continue;
- c = c_i;
- iio_device_get(c->indio_dev);
- break;
+ scoped_guard(mutex, &iio_map_list_lock) {
+ list_for_each_entry(c_i, &iio_map_list, l) {
+ if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
+ (channel_name &&
+ strcmp(channel_name, c_i->map->consumer_channel) != 0))
+ continue;
+ c = c_i;
+ iio_device_get(c->indio_dev);
+ break;
+ }
}
- mutex_unlock(&iio_map_list_lock);
if (!c)
return ERR_PTR(-ENODEV);
@@ -469,7 +461,7 @@ struct iio_channel *iio_channel_get_all(struct device *dev)
name = dev_name(dev);
- mutex_lock(&iio_map_list_lock);
+ guard(mutex)(&iio_map_list_lock);
/* first count the matching maps */
list_for_each_entry(c, &iio_map_list, l)
if (name && strcmp(name, c->map->consumer_dev_name) != 0)
@@ -477,17 +469,13 @@ struct iio_channel *iio_channel_get_all(struct device *dev)
else
nummaps++;
- if (nummaps == 0) {
- ret = -ENODEV;
- goto error_ret;
- }
+ if (nummaps == 0)
+ return ERR_PTR(-ENODEV);
/* NULL terminated array to save passing size */
chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
- if (!chans) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ if (!chans)
+ return ERR_PTR(-ENOMEM);
/* for each map fill in the chans element */
list_for_each_entry(c, &iio_map_list, l) {
@@ -509,7 +497,6 @@ struct iio_channel *iio_channel_get_all(struct device *dev)
ret = -ENODEV;
goto error_free_chans;
}
- mutex_unlock(&iio_map_list_lock);
return chans;
@@ -517,9 +504,6 @@ struct iio_channel *iio_channel_get_all(struct device *dev)
for (i = 0; i < nummaps; i++)
iio_device_put(chans[i].indio_dev);
kfree(chans);
-error_ret:
- mutex_unlock(&iio_map_list_lock);
-
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(iio_channel_get_all);
@@ -590,38 +574,24 @@ static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
int iio_read_channel_raw(struct iio_channel *chan, int *val)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
- int ret;
- mutex_lock(&iio_dev_opaque->info_exist_lock);
- if (!chan->indio_dev->info) {
- ret = -ENODEV;
- goto err_unlock;
- }
+ guard(mutex)(&iio_dev_opaque->info_exist_lock);
+ if (!chan->indio_dev->info)
+ return -ENODEV;
- ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
-err_unlock:
- mutex_unlock(&iio_dev_opaque->info_exist_lock);
-
- return ret;
+ return iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
}
EXPORT_SYMBOL_GPL(iio_read_channel_raw);
int iio_read_channel_average_raw(struct iio_channel *chan, int *val)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
- int ret;
- mutex_lock(&iio_dev_opaque->info_exist_lock);
- if (!chan->indio_dev->info) {
- ret = -ENODEV;
- goto err_unlock;
- }
+ guard(mutex)(&iio_dev_opaque->info_exist_lock);
+ if (!chan->indio_dev->info)
+ return -ENODEV;
- ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_AVERAGE_RAW);
-err_unlock:
- mutex_unlock(&iio_dev_opaque->info_exist_lock);
-
- return ret;
+ return iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_AVERAGE_RAW);
}
EXPORT_SYMBOL_GPL(iio_read_channel_average_raw);
@@ -708,20 +678,13 @@ int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
int *processed, unsigned int scale)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
- int ret;
- mutex_lock(&iio_dev_opaque->info_exist_lock);
- if (!chan->indio_dev->info) {
- ret = -ENODEV;
- goto err_unlock;
- }
+ guard(mutex)(&iio_dev_opaque->info_exist_lock);
+ if (!chan->indio_dev->info)
+ return -ENODEV;
- ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed,
- scale);
-err_unlock:
- mutex_unlock(&iio_dev_opaque->info_exist_lock);
-
- return ret;
+ return iio_convert_raw_to_processed_unlocked(chan, raw, processed,
+ scale);
}
EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
@@ -729,19 +692,12 @@ int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2,
enum iio_chan_info_enum attribute)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
- int ret;
- mutex_lock(&iio_dev_opaque->info_exist_lock);
- if (!chan->indio_dev->info) {
- ret = -ENODEV;
- goto err_unlock;
- }
+ guard(mutex)(&iio_dev_opaque->info_exist_lock);
+ if (!chan->indio_dev->info)
+ return -ENODEV;
- ret = iio_channel_read(chan, val, val2, attribute);
-err_unlock:
- mutex_unlock(&iio_dev_opaque->info_exist_lock);
-
- return ret;
+ return iio_channel_read(chan, val, val2, attribute);
}
EXPORT_SYMBOL_GPL(iio_read_channel_attribute);
@@ -757,30 +713,25 @@ int iio_read_channel_processed_scale(struct iio_channel *chan, int *val,
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
int ret;
- mutex_lock(&iio_dev_opaque->info_exist_lock);
- if (!chan->indio_dev->info) {
- ret = -ENODEV;
- goto err_unlock;
- }
+ guard(mutex)(&iio_dev_opaque->info_exist_lock);
+ if (!chan->indio_dev->info)
+ return -ENODEV;
if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
ret = iio_channel_read(chan, val, NULL,
IIO_CHAN_INFO_PROCESSED);
if (ret < 0)
- goto err_unlock;
+ return ret;
+
*val *= scale;
- } else {
- ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
- if (ret < 0)
- goto err_unlock;
- ret = iio_convert_raw_to_processed_unlocked(chan, *val, val,
- scale);
+ return ret;
}
-err_unlock:
- mutex_unlock(&iio_dev_opaque->info_exist_lock);
+ ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
+ if (ret < 0)
+ return ret;
- return ret;
+ return iio_convert_raw_to_processed_unlocked(chan, *val, val, scale);
}
EXPORT_SYMBOL_GPL(iio_read_channel_processed_scale);
@@ -813,19 +764,12 @@ int iio_read_avail_channel_attribute(struct iio_channel *chan,
enum iio_chan_info_enum attribute)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
- int ret;
- mutex_lock(&iio_dev_opaque->info_exist_lock);
- if (!chan->indio_dev->info) {
- ret = -ENODEV;
- goto err_unlock;
- }
+ guard(mutex)(&iio_dev_opaque->info_exist_lock);
+ if (!chan->indio_dev->info)
+ return -ENODEV;
- ret = iio_channel_read_avail(chan, vals, type, length, attribute);
-err_unlock:
- mutex_unlock(&iio_dev_opaque->info_exist_lock);
-
- return ret;
+ return iio_channel_read_avail(chan, vals, type, length, attribute);
}
EXPORT_SYMBOL_GPL(iio_read_avail_channel_attribute);
@@ -892,20 +836,13 @@ static int iio_channel_read_max(struct iio_channel *chan,
int iio_read_max_channel_raw(struct iio_channel *chan, int *val)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
- int ret;
int type;
- mutex_lock(&iio_dev_opaque->info_exist_lock);
- if (!chan->indio_dev->info) {
- ret = -ENODEV;
- goto err_unlock;
- }
+ guard(mutex)(&iio_dev_opaque->info_exist_lock);
+ if (!chan->indio_dev->info)
+ return -ENODEV;
- ret = iio_channel_read_max(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
-err_unlock:
- mutex_unlock(&iio_dev_opaque->info_exist_lock);
-
- return ret;
+ return iio_channel_read_max(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
}
EXPORT_SYMBOL_GPL(iio_read_max_channel_raw);
@@ -955,40 +892,28 @@ static int iio_channel_read_min(struct iio_channel *chan,
int iio_read_min_channel_raw(struct iio_channel *chan, int *val)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
- int ret;
int type;
- mutex_lock(&iio_dev_opaque->info_exist_lock);
- if (!chan->indio_dev->info) {
- ret = -ENODEV;
- goto err_unlock;
- }
+ guard(mutex)(&iio_dev_opaque->info_exist_lock);
+ if (!chan->indio_dev->info)
+ return -ENODEV;
- ret = iio_channel_read_min(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
-err_unlock:
- mutex_unlock(&iio_dev_opaque->info_exist_lock);
-
- return ret;
+ return iio_channel_read_min(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
}
EXPORT_SYMBOL_GPL(iio_read_min_channel_raw);
int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
- int ret = 0;
- /* Need to verify underlying driver has not gone away */
- mutex_lock(&iio_dev_opaque->info_exist_lock);
- if (!chan->indio_dev->info) {
- ret = -ENODEV;
- goto err_unlock;
- }
+ /* Need to verify underlying driver has not gone away */
+ guard(mutex)(&iio_dev_opaque->info_exist_lock);
+ if (!chan->indio_dev->info)
+ return -ENODEV;
*type = chan->channel->type;
-err_unlock:
- mutex_unlock(&iio_dev_opaque->info_exist_lock);
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(iio_get_channel_type);
@@ -1003,19 +928,12 @@ int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
enum iio_chan_info_enum attribute)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
- int ret;
- mutex_lock(&iio_dev_opaque->info_exist_lock);
- if (!chan->indio_dev->info) {
- ret = -ENODEV;
- goto err_unlock;
- }
+ guard(mutex)(&iio_dev_opaque->info_exist_lock);
+ if (!chan->indio_dev->info)
+ return -ENODEV;
- ret = iio_channel_write(chan, val, val2, attribute);
-err_unlock:
- mutex_unlock(&iio_dev_opaque->info_exist_lock);
-
- return ret;
+ return iio_channel_write(chan, val, val2, attribute);
}
EXPORT_SYMBOL_GPL(iio_write_channel_attribute);
--
2.43.0
^ permalink raw reply related [flat|nested] 8+ messages in thread