From: Ye Bin <yebin@huaweicloud.com>
To: viro@zeniv.linux.org.uk, brauner@kernel.org, jack@suse.cz,
linux-fsdevel@vger.kernel.org
Cc: akpm@linux-foundation.org, david@fromorbit.com,
zhengqi.arch@bytedance.com, roman.gushchin@linux.dev,
muchun.song@linux.dev, linux-mm@kvack.org, yebin10@huawei.com
Subject: [PATCH v3 2/3] sysctl: add support for drop_caches for individual filesystem
Date: Fri, 27 Feb 2026 10:55:47 +0800 [thread overview]
Message-ID: <20260227025548.2252380-3-yebin@huaweicloud.com> (raw)
In-Reply-To: <20260227025548.2252380-1-yebin@huaweicloud.com>
From: Ye Bin <yebin10@huawei.com>
In order to better analyze the issue of file system uninstallation caused
by kernel module opening files, it is necessary to perform dentry recycling
on a single file system. But now, apart from global dentry recycling, it is
not supported to do dentry recycling on a single file system separately.
This feature has usage scenarios in problem localization scenarios.At the
same time, it also provides users with a slightly fine-grained
pagecache/entry recycling mechanism.
This patch supports the recycling of pagecache/entry for individual file
systems.
Signed-off-by: Ye Bin <yebin10@huawei.com>
---
fs/drop_caches.c | 125 +++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 125 insertions(+)
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index 49f56a598ecb..0cd8ad9df07a 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -11,6 +11,8 @@
#include <linux/sysctl.h>
#include <linux/gfp.h>
#include <linux/swap.h>
+#include <linux/task_work.h>
+#include <linux/namei.h>
#include "internal.h"
/* A global variable is a bit ugly, but it keeps the code simple */
@@ -78,6 +80,124 @@ static int drop_caches_sysctl_handler(const struct ctl_table *table, int write,
return 0;
}
+struct drop_fs_caches_work {
+ struct callback_head task_work;
+ dev_t dev;
+ char *path;
+ unsigned int ctl;
+};
+
+static void drop_fs_caches(struct callback_head *twork)
+{
+ int ret;
+ struct super_block *sb;
+ static bool suppress;
+ struct drop_fs_caches_work *work = container_of(twork,
+ struct drop_fs_caches_work, task_work);
+ unsigned int ctl = work->ctl;
+ dev_t dev = work->dev;
+
+ if (work->path) {
+ struct path path;
+
+ ret = kern_path(work->path, LOOKUP_FOLLOW, &path);
+ if (ret) {
+ pr_err("%s (%d): %s: failed to get path(%s) %d\n",
+ current->comm, task_pid_nr(current),
+ __func__, work->path, ret);
+ goto out;
+ }
+ dev = path.dentry->d_sb->s_dev;
+ /* Make this file's dentry and inode recyclable */
+ path_put(&path);
+ }
+
+ sb = user_get_super(dev, false);
+ if (!sb) {
+ pr_err("%s (%d): %s: failed to get dev(%u:%u)'s sb\n",
+ current->comm, task_pid_nr(current), __func__,
+ MAJOR(dev), MINOR(dev));
+ goto out;
+ }
+
+ if (ctl & BIT(0)) {
+ lru_add_drain_all();
+ drop_pagecache_sb(sb, NULL);
+ count_vm_event(DROP_PAGECACHE);
+ }
+
+ if (ctl & BIT(1)) {
+ drop_sb_dentry_inode(sb);
+ count_vm_event(DROP_SLAB);
+ }
+
+ if (!READ_ONCE(suppress)) {
+ pr_info("%s (%d): %s: %d %u:%u\n", current->comm,
+ task_pid_nr(current), __func__, ctl,
+ MAJOR(sb->s_dev), MINOR(sb->s_dev));
+
+ if (ctl & BIT(2))
+ WRITE_ONCE(suppress, true);
+ }
+
+ drop_super(sb);
+out:
+ kfree(work->path);
+ kfree(work);
+}
+
+static int drop_fs_caches_sysctl_handler(const struct ctl_table *table,
+ int write, void *buffer,
+ size_t *length, loff_t *ppos)
+{
+ struct drop_fs_caches_work *work = NULL;
+ unsigned int major, minor;
+ unsigned int ctl;
+ int ret;
+ char *path = NULL;
+
+ if (!write)
+ return 0;
+
+ if (sscanf(buffer, "%u %u:%u", &ctl, &major, &minor) != 3) {
+ path = kstrdup(buffer, GFP_NOFS);
+ if (!path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (sscanf(buffer, "%u %s", &ctl, path) != 2) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (ctl < 1 || ctl > 7) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
+ if (!work) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ init_task_work(&work->task_work, drop_fs_caches);
+ if (!path)
+ work->dev = MKDEV(major, minor);
+ work->path = path;
+ work->ctl = ctl;
+ ret = task_work_add(current, &work->task_work, TWA_RESUME);
+out:
+ if (ret) {
+ kfree(path);
+ kfree(work);
+ }
+
+ return ret;
+}
+
static const struct ctl_table drop_caches_table[] = {
{
.procname = "drop_caches",
@@ -88,6 +208,11 @@ static const struct ctl_table drop_caches_table[] = {
.extra1 = SYSCTL_ONE,
.extra2 = SYSCTL_FOUR,
},
+ {
+ .procname = "drop_fs_caches",
+ .mode = 0200,
+ .proc_handler = drop_fs_caches_sysctl_handler,
+ },
};
static int __init init_vm_drop_caches_sysctls(void)
--
2.34.1
next prev parent reply other threads:[~2026-02-27 2:57 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-27 2:55 [PATCH v3 0/3] " Ye Bin
2026-02-27 2:55 ` [PATCH v3 1/3] mm/vmscan: introduce drop_sb_dentry_inode() helper Ye Bin
2026-02-27 2:55 ` Ye Bin [this message]
2026-02-27 2:55 ` [PATCH v3 3/3] Documentation: add instructions for using 'drop_fs_caches sysctl' sysctl Ye Bin
2026-02-27 3:31 ` [PATCH v3 0/3] add support for drop_caches for individual filesystem Muchun Song
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260227025548.2252380-3-yebin@huaweicloud.com \
--to=yebin@huaweicloud.com \
--cc=akpm@linux-foundation.org \
--cc=brauner@kernel.org \
--cc=david@fromorbit.com \
--cc=jack@suse.cz \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=muchun.song@linux.dev \
--cc=roman.gushchin@linux.dev \
--cc=viro@zeniv.linux.org.uk \
--cc=yebin10@huawei.com \
--cc=zhengqi.arch@bytedance.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox