public inbox for linux-fsdevel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH] fs: hide file and bfile caches behind runtime const machinery
@ 2026-03-28 17:37 Mateusz Guzik
  2026-03-30  8:47 ` Jan Kara
                   ` (2 more replies)
  0 siblings, 3 replies; 5+ messages in thread
From: Mateusz Guzik @ 2026-03-28 17:37 UTC (permalink / raw)
  To: brauner; +Cc: viro, jack, linux-kernel, linux-fsdevel, Mateusz Guzik

s/cachep/cache/ for consistency with namei and dentry caches.

Signed-off-by: Mateusz Guzik <mjguzik@gmail.com>
---
 fs/file.c                         |  2 +-
 fs/file_table.c                   | 31 +++++++++++++++++++------------
 include/asm-generic/vmlinux.lds.h |  4 +++-
 3 files changed, 23 insertions(+), 14 deletions(-)

diff --git a/fs/file.c b/fs/file.c
index 384c83ce768d..2c81c0b162d0 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -200,7 +200,7 @@ static struct fdtable *alloc_fdtable(unsigned int slots_wanted)
 	/*
 	 * Check if the allocation size would exceed INT_MAX. kvmalloc_array()
 	 * and kvmalloc() will warn if the allocation size is greater than
-	 * INT_MAX, as filp_cachep objects are not __GFP_NOWARN.
+	 * INT_MAX, as filp_cache objects are not __GFP_NOWARN.
 	 *
 	 * This can happen when sysctl_nr_open is set to a very high value and
 	 * a process tries to use a file descriptor near that limit. For example,
diff --git a/fs/file_table.c b/fs/file_table.c
index aaa5faaace1e..c40ec1be2899 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -30,6 +30,8 @@
 
 #include <linux/atomic.h>
 
+#include <asm/runtime-const.h>
+
 #include "internal.h"
 
 /* sysctl tunables... */
@@ -38,8 +40,10 @@ static struct files_stat_struct files_stat = {
 };
 
 /* SLAB cache for file structures */
-static struct kmem_cache *filp_cachep __ro_after_init;
-static struct kmem_cache *bfilp_cachep __ro_after_init;
+static struct kmem_cache *__filp_cache __ro_after_init;
+#define filp_cache runtime_const_ptr(__filp_cache)
+static struct kmem_cache *__bfilp_cache __ro_after_init;
+#define bfilp_cache runtime_const_ptr(__bfilp_cache)
 
 static struct percpu_counter nr_files __cacheline_aligned_in_smp;
 
@@ -74,9 +78,9 @@ static inline void file_free(struct file *f)
 	put_cred(f->f_cred);
 	if (unlikely(f->f_mode & FMODE_BACKING)) {
 		path_put(backing_file_user_path(f));
-		kmem_cache_free(bfilp_cachep, backing_file(f));
+		kmem_cache_free(bfilp_cache, backing_file(f));
 	} else {
-		kmem_cache_free(filp_cachep, f);
+		kmem_cache_free(filp_cache, f);
 	}
 }
 
@@ -234,13 +238,13 @@ struct file *alloc_empty_file(int flags, const struct cred *cred)
 			goto over;
 	}
 
-	f = kmem_cache_alloc(filp_cachep, GFP_KERNEL);
+	f = kmem_cache_alloc(filp_cache, GFP_KERNEL);
 	if (unlikely(!f))
 		return ERR_PTR(-ENOMEM);
 
 	error = init_file(f, flags, cred);
 	if (unlikely(error)) {
-		kmem_cache_free(filp_cachep, f);
+		kmem_cache_free(filp_cache, f);
 		return ERR_PTR(error);
 	}
 
@@ -268,13 +272,13 @@ struct file *alloc_empty_file_noaccount(int flags, const struct cred *cred)
 	struct file *f;
 	int error;
 
-	f = kmem_cache_alloc(filp_cachep, GFP_KERNEL);
+	f = kmem_cache_alloc(filp_cache, GFP_KERNEL);
 	if (unlikely(!f))
 		return ERR_PTR(-ENOMEM);
 
 	error = init_file(f, flags, cred);
 	if (unlikely(error)) {
-		kmem_cache_free(filp_cachep, f);
+		kmem_cache_free(filp_cache, f);
 		return ERR_PTR(error);
 	}
 
@@ -295,13 +299,13 @@ struct file *alloc_empty_backing_file(int flags, const struct cred *cred)
 	struct backing_file *ff;
 	int error;
 
-	ff = kmem_cache_alloc(bfilp_cachep, GFP_KERNEL);
+	ff = kmem_cache_alloc(bfilp_cache, GFP_KERNEL);
 	if (unlikely(!ff))
 		return ERR_PTR(-ENOMEM);
 
 	error = init_file(&ff->file, flags, cred);
 	if (unlikely(error)) {
-		kmem_cache_free(bfilp_cachep, ff);
+		kmem_cache_free(bfilp_cache, ff);
 		return ERR_PTR(error);
 	}
 
@@ -593,14 +597,17 @@ void __init files_init(void)
 		.freeptr_offset = offsetof(struct file, f_freeptr),
 	};
 
-	filp_cachep = kmem_cache_create("filp", sizeof(struct file), &args,
+	__filp_cache = kmem_cache_create("filp", sizeof(struct file), &args,
 				SLAB_HWCACHE_ALIGN | SLAB_PANIC |
 				SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU);
+	runtime_const_init(ptr, __filp_cache);
 
 	args.freeptr_offset = offsetof(struct backing_file, bf_freeptr);
-	bfilp_cachep = kmem_cache_create("bfilp", sizeof(struct backing_file),
+	__bfilp_cache = kmem_cache_create("bfilp", sizeof(struct backing_file),
 				&args, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
 				SLAB_ACCOUNT | SLAB_TYPESAFE_BY_RCU);
+	runtime_const_init(ptr, __bfilp_cache);
+
 	percpu_counter_init(&nr_files, 0, GFP_KERNEL);
 }
 
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 1e1580febe4b..4f8e734c4336 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -975,7 +975,9 @@
 		RUNTIME_CONST(shift, d_hash_shift)			\
 		RUNTIME_CONST(ptr, dentry_hashtable)			\
 		RUNTIME_CONST(ptr, __dentry_cache)			\
-		RUNTIME_CONST(ptr, __names_cache)
+		RUNTIME_CONST(ptr, __names_cache)			\
+		RUNTIME_CONST(ptr, __filp_cache)			\
+		RUNTIME_CONST(ptr, __bfilp_cache)
 
 /* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */
 #define KUNIT_TABLE()							\
-- 
2.48.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2026-04-06 21:45 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-03-28 17:37 [PATCH] fs: hide file and bfile caches behind runtime const machinery Mateusz Guzik
2026-03-30  8:47 ` Jan Kara
2026-03-31  9:44 ` Christian Brauner
2026-04-06 21:16 ` Al Viro
2026-04-06 21:45   ` Mateusz Guzik

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox