From: Yufan Chen <yufan.chen@linux.dev>
To: Mark Fasheh <mark@fasheh.com>, Joel Becker <jlbec@evilplan.org>,
Joseph Qi <joseph.qi@linux.alibaba.com>,
ocfs2-devel@lists.linux.dev, linux-kernel@vger.kernel.org
Cc: yufan.chen@linux.dev, Yufan Chen <ericterminal@gmail.com>
Subject: [PATCH] ocfs2/heartbeat: fix slot mapping rollback leaks on error paths
Date: Sat, 28 Mar 2026 17:23:39 +0800 [thread overview]
Message-ID: <20260328092339.75306-1-yufan.chen@linux.dev> (raw)
From: Yufan Chen <ericterminal@gmail.com>
o2hb_map_slot_data() allocated hr_tmp_block, hr_slots, hr_slot_data, and pages in stages but returned directly on allocation failures without unwinding previously allocated resources. Under repeated allocation failures this could leak memory and increase pressure toward OOM.
o2hb_region_dev_store() also failed to roll back slot mapping resources when setup aborted, leaving stale allocations around retry attempts.
Introduce o2hb_unmap_slot_data() as a single reverse-order cleanup helper, switch o2hb_map_slot_data() to a centralized goto-based error exit, and call the same rollback path from o2hb_region_dev_store() after stopping a possibly started heartbeat thread. This ensures failed setup fully releases resources and remains safely retryable.
Signed-off-by: Yufan Chen <ericterminal@gmail.com>
---
fs/ocfs2/cluster/heartbeat.c | 64 +++++++++++++++++++++++++-----------
1 file changed, 45 insertions(+), 19 deletions(-)
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index fe1949578..2f82040f4 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -1488,18 +1488,10 @@ static struct o2hb_region *to_o2hb_region(struct config_item *item)
return item ? container_of(item, struct o2hb_region, hr_item) : NULL;
}
-/* drop_item only drops its ref after killing the thread, nothing should
- * be using the region anymore. this has to clean up any state that
- * attributes might have built up. */
-static void o2hb_region_release(struct config_item *item)
+static void o2hb_unmap_slot_data(struct o2hb_region *reg)
{
int i;
struct page *page;
- struct o2hb_region *reg = to_o2hb_region(item);
-
- mlog(ML_HEARTBEAT, "hb region release (%pg)\n", reg_bdev(reg));
-
- kfree(reg->hr_tmp_block);
if (reg->hr_slot_data) {
for (i = 0; i < reg->hr_num_pages; i++) {
@@ -1508,13 +1500,32 @@ static void o2hb_region_release(struct config_item *item)
__free_page(page);
}
kfree(reg->hr_slot_data);
+ reg->hr_slot_data = NULL;
}
+ kfree(reg->hr_slots);
+ reg->hr_slots = NULL;
+
+ kfree(reg->hr_tmp_block);
+ reg->hr_tmp_block = NULL;
+ reg->hr_num_pages = 0;
+}
+
+/* drop_item only drops its ref after killing the thread, nothing should
+ * be using the region anymore. this has to clean up any state that
+ * attributes might have built up.
+ */
+static void o2hb_region_release(struct config_item *item)
+{
+ struct o2hb_region *reg = to_o2hb_region(item);
+
+ mlog(ML_HEARTBEAT, "hb region release (%pg)\n", reg_bdev(reg));
+
+ o2hb_unmap_slot_data(reg);
+
if (reg->hr_bdev_file)
fput(reg->hr_bdev_file);
- kfree(reg->hr_slots);
-
debugfs_remove_recursive(reg->hr_debug_dir);
kfree(reg->hr_db_livenodes);
kfree(reg->hr_db_regnum);
@@ -1667,6 +1678,7 @@ static void o2hb_init_region_params(struct o2hb_region *reg)
static int o2hb_map_slot_data(struct o2hb_region *reg)
{
int i, j;
+ int ret = -ENOMEM;
unsigned int last_slot;
unsigned int spp = reg->hr_slots_per_page;
struct page *page;
@@ -1674,14 +1686,14 @@ static int o2hb_map_slot_data(struct o2hb_region *reg)
struct o2hb_disk_slot *slot;
reg->hr_tmp_block = kmalloc(reg->hr_block_bytes, GFP_KERNEL);
- if (reg->hr_tmp_block == NULL)
- return -ENOMEM;
+ if (!reg->hr_tmp_block)
+ goto out;
reg->hr_slots = kzalloc_objs(struct o2hb_disk_slot, reg->hr_blocks);
- if (reg->hr_slots == NULL)
- return -ENOMEM;
+ if (!reg->hr_slots)
+ goto out;
- for(i = 0; i < reg->hr_blocks; i++) {
+ for (i = 0; i < reg->hr_blocks; i++) {
slot = ®->hr_slots[i];
slot->ds_node_num = i;
INIT_LIST_HEAD(&slot->ds_live_item);
@@ -1695,12 +1707,12 @@ static int o2hb_map_slot_data(struct o2hb_region *reg)
reg->hr_slot_data = kzalloc_objs(struct page *, reg->hr_num_pages);
if (!reg->hr_slot_data)
- return -ENOMEM;
+ goto out;
- for(i = 0; i < reg->hr_num_pages; i++) {
+ for (i = 0; i < reg->hr_num_pages; i++) {
page = alloc_page(GFP_KERNEL);
if (!page)
- return -ENOMEM;
+ goto out;
reg->hr_slot_data[i] = page;
@@ -1720,6 +1732,10 @@ static int o2hb_map_slot_data(struct o2hb_region *reg)
}
return 0;
+
+out:
+ o2hb_unmap_slot_data(reg);
+ return ret;
}
/* Read in all the slots available and populate the tracking
@@ -1903,6 +1919,16 @@ static ssize_t o2hb_region_dev_store(struct config_item *item,
out3:
if (ret < 0) {
+ spin_lock(&o2hb_live_lock);
+ hb_task = reg->hr_task;
+ reg->hr_task = NULL;
+ spin_unlock(&o2hb_live_lock);
+
+ if (hb_task)
+ kthread_stop(hb_task);
+
+ o2hb_unmap_slot_data(reg);
+
fput(reg->hr_bdev_file);
reg->hr_bdev_file = NULL;
}
--
2.47.3
next reply other threads:[~2026-03-28 9:23 UTC|newest]
Thread overview: 2+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-28 9:23 Yufan Chen [this message]
2026-03-30 2:14 ` [PATCH] ocfs2/heartbeat: fix slot mapping rollback leaks on error paths Joseph Qi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260328092339.75306-1-yufan.chen@linux.dev \
--to=yufan.chen@linux.dev \
--cc=ericterminal@gmail.com \
--cc=jlbec@evilplan.org \
--cc=joseph.qi@linux.alibaba.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mark@fasheh.com \
--cc=ocfs2-devel@lists.linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox