* [PATCH 1/2] nullblk: main: use lock guards
2026-04-18 4:38 [PATCH 0/2] nullblk: use lock guards Ricardo H H Kojo
@ 2026-04-18 4:38 ` Ricardo H H Kojo
2026-04-18 13:58 ` Jens Axboe
2026-04-18 4:38 ` [PATCH 2/2] nullblk: zoned: " Ricardo H H Kojo
1 sibling, 1 reply; 4+ messages in thread
From: Ricardo H H Kojo @ 2026-04-18 4:38 UTC (permalink / raw)
To: axboe; +Cc: Ricardo H H Kojo, Ellian Carlos, Gabriel B L de Oliveira,
linux-block
Use guard() and scoped_guard() for handling mutex and spin locks instead of
manually locking and unlocking. This prevents forgotten locks due to early
exits and remove the need of gotos.
Signed-off-by: Ricardo H H Kojo <ricardo.kojo@ime.usp.br>
Co-developed-by: Ellian Carlos <elliancarlos@gmail.com>
Signed-off-by: Ellian Carlos <elliancarlos@gmail.com>
Co-developed-by: Gabriel B L de Oliveira <gabrielblo@ime.usp.br>
Signed-off-by: Gabriel B L de Oliveira <gabrielblo@ime.usp.br>
---
drivers/block/null_blk/main.c | 74 +++++++++++++------------------
drivers/block/null_blk/null_blk.h | 1 +
2 files changed, 32 insertions(+), 43 deletions(-)
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index f8c0fd57e041..fbe34e8a6c93 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -423,9 +423,8 @@ static int nullb_apply_submit_queues(struct nullb_device *dev,
{
int ret;
- mutex_lock(&lock);
+ guard(mutex)(&lock);
ret = nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues);
- mutex_unlock(&lock);
return ret;
}
@@ -435,9 +434,8 @@ static int nullb_apply_poll_queues(struct nullb_device *dev,
{
int ret;
- mutex_lock(&lock);
+ guard(mutex)(&lock);
ret = nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues);
- mutex_unlock(&lock);
return ret;
}
@@ -493,15 +491,15 @@ static ssize_t nullb_device_power_store(struct config_item *item,
return ret;
ret = count;
- mutex_lock(&lock);
+ guard(mutex)(&lock);
if (!dev->power && newp) {
if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
- goto out;
+ return ret;
ret = null_add_dev(dev);
if (ret) {
clear_bit(NULLB_DEV_FL_UP, &dev->flags);
- goto out;
+ return ret;
}
set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
@@ -515,8 +513,6 @@ static ssize_t nullb_device_power_store(struct config_item *item,
clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
}
-out:
- mutex_unlock(&lock);
return ret;
}
@@ -707,10 +703,9 @@ nullb_group_drop_item(struct config_group *group, struct config_item *item)
struct nullb_device *dev = to_nullb_device(item);
if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
- mutex_lock(&lock);
+ guard(mutex)(&lock);
dev->power = false;
null_del_dev(dev->nullb);
- mutex_unlock(&lock);
}
nullb_del_fault_config(dev);
config_item_put(item);
@@ -1205,7 +1200,7 @@ blk_status_t null_handle_discard(struct nullb_device *dev,
size_t n = nr_sectors << SECTOR_SHIFT;
size_t temp;
- spin_lock_irq(&nullb->lock);
+ guard(spinlock_irq)(&nullb->lock);
while (n > 0) {
temp = min_t(size_t, n, dev->blocksize);
null_free_sector(nullb, sector, false);
@@ -1214,7 +1209,6 @@ blk_status_t null_handle_discard(struct nullb_device *dev,
sector += temp >> SECTOR_SHIFT;
n -= temp;
}
- spin_unlock_irq(&nullb->lock);
return BLK_STS_OK;
}
@@ -1226,7 +1220,7 @@ static blk_status_t null_handle_flush(struct nullb *nullb)
if (!null_cache_active(nullb))
return 0;
- spin_lock_irq(&nullb->lock);
+ guard(spinlock_irq)(&nullb->lock);
while (true) {
err = null_make_cache_space(nullb,
nullb->dev->cache_size * 1024 * 1024);
@@ -1235,7 +1229,6 @@ static blk_status_t null_handle_flush(struct nullb *nullb)
}
WARN_ON(!radix_tree_empty(&nullb->dev->cache));
- spin_unlock_irq(&nullb->lock);
return errno_to_blk_status(err);
}
@@ -1292,7 +1285,7 @@ static blk_status_t null_handle_data_transfer(struct nullb_cmd *cmd,
struct req_iterator iter;
struct bio_vec bvec;
- spin_lock_irq(&nullb->lock);
+ guard(spinlock_irq)(&nullb->lock);
rq_for_each_segment(bvec, rq, iter) {
len = bvec.bv_len;
if (transferred_bytes + len > max_bytes)
@@ -1307,7 +1300,6 @@ static blk_status_t null_handle_data_transfer(struct nullb_cmd *cmd,
if (transferred_bytes >= max_bytes)
break;
}
- spin_unlock_irq(&nullb->lock);
return err;
}
@@ -1592,11 +1584,11 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
int nr = 0;
struct request *rq;
- spin_lock(&nq->poll_lock);
- list_splice_init(&nq->poll_list, &list);
- list_for_each_entry(rq, &list, queuelist)
- blk_mq_set_request_complete(rq);
- spin_unlock(&nq->poll_lock);
+ scoped_guard(spinlock, &nq->poll_lock) {
+ list_splice_init(&nq->poll_list, &list);
+ list_for_each_entry(rq, &list, queuelist)
+ blk_mq_set_request_complete(rq);
+ }
while (!list_empty(&list)) {
struct nullb_cmd *cmd;
@@ -1624,14 +1616,12 @@ static enum blk_eh_timer_return null_timeout_rq(struct request *rq)
if (hctx->type == HCTX_TYPE_POLL) {
struct nullb_queue *nq = hctx->driver_data;
- spin_lock(&nq->poll_lock);
- /* The request may have completed meanwhile. */
- if (blk_mq_request_completed(rq)) {
- spin_unlock(&nq->poll_lock);
- return BLK_EH_DONE;
+ scoped_guard(spinlock, &nq->poll_lock) {
+ /* The request may have completed meanwhile. */
+ if (blk_mq_request_completed(rq))
+ return BLK_EH_DONE;
+ list_del_init(&rq->queuelist);
}
- list_del_init(&rq->queuelist);
- spin_unlock(&nq->poll_lock);
}
pr_info("rq %p timed out\n", rq);
@@ -1692,9 +1682,9 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_start_request(rq);
if (is_poll) {
- spin_lock(&nq->poll_lock);
- list_add_tail(&rq->queuelist, &nq->poll_list);
- spin_unlock(&nq->poll_lock);
+ scoped_guard(spinlock, &nq->poll_lock) {
+ list_add_tail(&rq->queuelist, &nq->poll_list);
+ }
return BLK_STS_OK;
}
if (cmd->fake_timeout)
@@ -2081,14 +2071,13 @@ static struct nullb *null_find_dev_by_name(const char *name)
{
struct nullb *nullb = NULL, *nb;
- mutex_lock(&lock);
+ guard(mutex)(&lock);
list_for_each_entry(nb, &nullb_list, list) {
if (strcmp(nb->disk_name, name) == 0) {
nullb = nb;
break;
}
}
- mutex_unlock(&lock);
return nullb;
}
@@ -2101,10 +2090,9 @@ static int null_create_dev(void)
dev = null_alloc_dev();
if (!dev)
return -ENOMEM;
-
- mutex_lock(&lock);
- ret = null_add_dev(dev);
- mutex_unlock(&lock);
+ scoped_guard(mutex, &lock) {
+ ret = null_add_dev(dev);
+ }
if (ret) {
null_free_dev(dev);
return ret;
@@ -2202,12 +2190,12 @@ static void __exit null_exit(void)
unregister_blkdev(null_major, "nullb");
- mutex_lock(&lock);
- while (!list_empty(&nullb_list)) {
- nullb = list_entry(nullb_list.next, struct nullb, list);
- null_destroy_dev(nullb);
+ scoped_guard(mutex, &lock) {
+ while (!list_empty(&nullb_list)) {
+ nullb = list_entry(nullb_list.next, struct nullb, list);
+ null_destroy_dev(nullb);
+ }
}
- mutex_unlock(&lock);
if (tag_set.ops)
blk_mq_free_tag_set(&tag_set);
diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
index 6c4c4bbe7dad..c2bb085d3582 100644
--- a/drivers/block/null_blk/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -14,6 +14,7 @@
#include <linux/fault-inject.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
+#include <linux/cleanup.h>
struct nullb_cmd {
blk_status_t error;
--
2.34.1
^ permalink raw reply related [flat|nested] 4+ messages in thread* [PATCH 2/2] nullblk: zoned: use lock guards
2026-04-18 4:38 [PATCH 0/2] nullblk: use lock guards Ricardo H H Kojo
2026-04-18 4:38 ` [PATCH 1/2] nullblk: main: " Ricardo H H Kojo
@ 2026-04-18 4:38 ` Ricardo H H Kojo
1 sibling, 0 replies; 4+ messages in thread
From: Ricardo H H Kojo @ 2026-04-18 4:38 UTC (permalink / raw)
To: axboe; +Cc: Ricardo H H Kojo, Ellian Carlos, Gabriel B L de Oliveira,
linux-block
Use guard() and scoped_guard() for handling spin locks instead of manually
locking and unlocking. This prevents forgotten locks due to early exits.
Signed-off-by: Ricardo H H Kojo <ricardo.kojo@ime.usp.br>
Co-developed-by: Ellian Carlos <elliancarlos@gmail.com>
Signed-off-by: Ellian Carlos <elliancarlos@gmail.com>
Co-developed-by: Gabriel B L de Oliveira <gabrielblo@ime.usp.br>
Signed-off-by: Gabriel B L de Oliveira <gabrielblo@ime.usp.br>
---
drivers/block/null_blk/zoned.c | 19 ++++---------------
1 file changed, 4 insertions(+), 15 deletions(-)
diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
index 384bdce6a9b7..6f59531e9b1d 100644
--- a/drivers/block/null_blk/zoned.c
+++ b/drivers/block/null_blk/zoned.c
@@ -471,13 +471,12 @@ static blk_status_t null_open_zone(struct nullb_device *dev,
}
if (dev->need_zone_res_mgmt) {
- spin_lock(&dev->zone_res_lock);
+ guard(spinlock)(&dev->zone_res_lock);
switch (zone->cond) {
case BLK_ZONE_COND_EMPTY:
ret = null_check_zone_resources(dev, zone);
if (ret != BLK_STS_OK) {
- spin_unlock(&dev->zone_res_lock);
return ret;
}
break;
@@ -487,7 +486,6 @@ static blk_status_t null_open_zone(struct nullb_device *dev,
case BLK_ZONE_COND_CLOSED:
ret = null_check_zone_resources(dev, zone);
if (ret != BLK_STS_OK) {
- spin_unlock(&dev->zone_res_lock);
return ret;
}
dev->nr_zones_closed--;
@@ -497,8 +495,6 @@ static blk_status_t null_open_zone(struct nullb_device *dev,
}
dev->nr_zones_exp_open++;
-
- spin_unlock(&dev->zone_res_lock);
}
zone->cond = BLK_ZONE_COND_EXP_OPEN;
@@ -526,7 +522,7 @@ static blk_status_t null_close_zone(struct nullb_device *dev,
}
if (dev->need_zone_res_mgmt) {
- spin_lock(&dev->zone_res_lock);
+ guard(spinlock)(&dev->zone_res_lock);
switch (zone->cond) {
case BLK_ZONE_COND_IMP_OPEN:
@@ -542,7 +538,6 @@ static blk_status_t null_close_zone(struct nullb_device *dev,
if (zone->wp > zone->start)
dev->nr_zones_closed++;
- spin_unlock(&dev->zone_res_lock);
}
if (zone->wp == zone->start)
@@ -562,17 +557,15 @@ static blk_status_t null_finish_zone(struct nullb_device *dev,
return BLK_STS_IOERR;
if (dev->need_zone_res_mgmt) {
- spin_lock(&dev->zone_res_lock);
+ guard(spinlock)(&dev->zone_res_lock);
switch (zone->cond) {
case BLK_ZONE_COND_FULL:
/* Finish operation on full is not an error */
- spin_unlock(&dev->zone_res_lock);
return BLK_STS_OK;
case BLK_ZONE_COND_EMPTY:
ret = null_check_zone_resources(dev, zone);
if (ret != BLK_STS_OK) {
- spin_unlock(&dev->zone_res_lock);
return ret;
}
break;
@@ -585,13 +578,11 @@ static blk_status_t null_finish_zone(struct nullb_device *dev,
case BLK_ZONE_COND_CLOSED:
ret = null_check_zone_resources(dev, zone);
if (ret != BLK_STS_OK) {
- spin_unlock(&dev->zone_res_lock);
return ret;
}
dev->nr_zones_closed--;
break;
default:
- spin_unlock(&dev->zone_res_lock);
return BLK_STS_IOERR;
}
@@ -611,7 +602,7 @@ static blk_status_t null_reset_zone(struct nullb_device *dev,
return BLK_STS_IOERR;
if (dev->need_zone_res_mgmt) {
- spin_lock(&dev->zone_res_lock);
+ guard(spinlock)(&dev->zone_res_lock);
switch (zone->cond) {
case BLK_ZONE_COND_IMP_OPEN:
@@ -627,11 +618,9 @@ static blk_status_t null_reset_zone(struct nullb_device *dev,
case BLK_ZONE_COND_FULL:
break;
default:
- spin_unlock(&dev->zone_res_lock);
return BLK_STS_IOERR;
}
- spin_unlock(&dev->zone_res_lock);
}
zone->cond = BLK_ZONE_COND_EMPTY;
--
2.34.1
^ permalink raw reply related [flat|nested] 4+ messages in thread