linux-fsdevel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Ye Bin <yebin@huaweicloud.com>
To: viro@zeniv.linux.org.uk, brauner@kernel.org, jack@suse.cz,
	linux-fsdevel@vger.kernel.org
Cc: linux-kernel@vger.kernel.org, yebin10@huawei.com
Subject: [PATCH v2 2/3] sysctl: add support for drop_caches for individual filesystem
Date: Mon, 17 Nov 2025 19:27:34 +0800	[thread overview]
Message-ID: <20251117112735.4170831-3-yebin@huaweicloud.com> (raw)
In-Reply-To: <20251117112735.4170831-1-yebin@huaweicloud.com>

From: Ye Bin <yebin10@huawei.com>

In order to better analyze the issue of file system uninstallation caused
by kernel module opening files, it is necessary to perform dentry recycling
on a single file system. But now, apart from global dentry recycling, it is
not supported to do dentry recycling on a single file system separately.
This feature has usage scenarios in problem localization scenarios.At the
same time, it also provides users with a slightly fine-grained
pagecache/entry recycling mechanism.
This patch supports the recycling of pagecache/entry for individual file
systems.

Signed-off-by: Ye Bin <yebin10@huawei.com>
---
 fs/drop_caches.c | 127 +++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 127 insertions(+)

diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index 49f56a598ecb..3c7e624129ec 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -11,6 +11,10 @@
 #include <linux/sysctl.h>
 #include <linux/gfp.h>
 #include <linux/swap.h>
+#include <linux/ptrace.h>
+#include <asm/syscall.h>
+#include <linux/task_work.h>
+#include <linux/namei.h>
 #include "internal.h"
 
 /* A global variable is a bit ugly, but it keeps the code simple */
@@ -78,6 +82,124 @@ static int drop_caches_sysctl_handler(const struct ctl_table *table, int write,
 	return 0;
 }
 
+struct drop_fs_caches_work {
+	struct callback_head task_work;
+	dev_t dev;
+	char *path;
+	unsigned int ctl;
+};
+
+static void drop_fs_caches(struct callback_head *twork)
+{
+	int ret;
+	struct super_block *sb;
+	static bool suppress;
+	struct drop_fs_caches_work *work = container_of(twork,
+			struct drop_fs_caches_work, task_work);
+	unsigned int ctl = work->ctl;
+	dev_t dev = work->dev;
+
+	if (work->path) {
+		struct path path;
+
+		ret = kern_path(work->path, LOOKUP_FOLLOW, &path);
+		if (ret) {
+			syscall_set_return_value(current,
+						 current_pt_regs(),
+						 0, ret);
+			goto out;
+		}
+		dev = path.dentry->d_sb->s_dev;
+		/* Make this file's dentry and inode recyclable */
+		path_put(&path);
+	}
+
+	sb = user_get_super(dev, false);
+	if (!sb) {
+		syscall_set_return_value(current, current_pt_regs(), 0,
+					 -EINVAL);
+		goto out;
+	}
+
+	if (ctl & BIT(0)) {
+		lru_add_drain_all();
+		drop_pagecache_sb(sb, NULL);
+		count_vm_event(DROP_PAGECACHE);
+	}
+
+	if (ctl & BIT(1)) {
+		reclaim_dcache_sb(sb);
+		reclaim_icache_sb(sb);
+		count_vm_event(DROP_SLAB);
+	}
+
+	if (!READ_ONCE(suppress)) {
+		pr_info("%s (%d): %s: %d %u:%u\n", current->comm,
+			task_pid_nr(current), __func__, ctl,
+			MAJOR(sb->s_dev), MINOR(sb->s_dev));
+
+		if (ctl & BIT(2))
+			WRITE_ONCE(suppress, true);
+	}
+
+	drop_super(sb);
+out:
+	kfree(work->path);
+	kfree(work);
+}
+
+static int drop_fs_caches_sysctl_handler(const struct ctl_table *table,
+					 int write, void *buffer,
+					 size_t *length, loff_t *ppos)
+{
+	struct drop_fs_caches_work *work = NULL;
+	unsigned int major, minor;
+	unsigned int ctl;
+	int ret;
+	char *path = NULL;
+
+	if (!write)
+		return 0;
+
+	if (sscanf(buffer, "%u %u:%u", &ctl, &major, &minor) != 3) {
+		path = kstrdup(buffer, GFP_NOFS);
+		if (!path) {
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		if (sscanf(buffer, "%u %s", &ctl, path) != 2) {
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
+	if (ctl < 1 || ctl > 7) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	work = kzalloc(sizeof(*work), GFP_KERNEL);
+	if (!work) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	init_task_work(&work->task_work, drop_fs_caches);
+	if (!path)
+		work->dev = MKDEV(major, minor);
+	work->path = path;
+	work->ctl = ctl;
+	ret = task_work_add(current, &work->task_work, TWA_RESUME);
+out:
+	if (ret) {
+		kfree(path);
+		kfree(work);
+	}
+
+	return ret;
+}
+
 static const struct ctl_table drop_caches_table[] = {
 	{
 		.procname	= "drop_caches",
@@ -88,6 +210,11 @@ static const struct ctl_table drop_caches_table[] = {
 		.extra1		= SYSCTL_ONE,
 		.extra2		= SYSCTL_FOUR,
 	},
+	{
+		.procname	= "drop_fs_caches",
+		.mode		= 0200,
+		.proc_handler	= drop_fs_caches_sysctl_handler,
+	},
 };
 
 static int __init init_vm_drop_caches_sysctls(void)
-- 
2.34.1


  parent reply	other threads:[~2025-11-17 11:27 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-11-17 11:27 [PATCH v2 0/3] add support for drop_caches for individual filesystem Ye Bin
2025-11-17 11:27 ` [PATCH v2 1/3] vfs: introduce reclaim_icache_sb() and reclaim_dcache_sb() helper Ye Bin
2025-11-17 11:27 ` Ye Bin [this message]
2025-11-17 13:57   ` [PATCH v2 2/3] sysctl: add support for drop_caches for individual filesystem kernel test robot
2025-11-17 14:07   ` kernel test robot
2025-11-17 11:27 ` [PATCH v2 3/3] Documentation: add instructions for using 'drop_fs_caches sysctl' sysctl Ye Bin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20251117112735.4170831-3-yebin@huaweicloud.com \
    --to=yebin@huaweicloud.com \
    --cc=brauner@kernel.org \
    --cc=jack@suse.cz \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=viro@zeniv.linux.org.uk \
    --cc=yebin10@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).