* [PATCH 1/2] mm: huge_memory: refactor thpsize_shmem_enabled_store() with sysfs_match_string()
@ 2026-05-12 12:05 ranxiaokai627
2026-05-12 12:05 ` [PATCH 2/2] mm: huge_memory: refactor thpsize_shmem_enabled_show() with helper arrays ranxiaokai627
0 siblings, 1 reply; 2+ messages in thread
From: ranxiaokai627 @ 2026-05-12 12:05 UTC (permalink / raw)
To: hughd, baolin.wang, akpm
Cc: linux-mm, linux-kernel, ran.xiaokai, ranxiaokai627
From: Ran Xiaokai <ran.xiaokai@zte.com.cn>
Inspired by commit 82d9ff648c6c ("mm: huge_memory: refactor
anon_enabled_store() with set_anon_enabled_mode()"), refactor
thpsize_shmem_enabled_store() using sysfs_match_string().
This eliminates the duplicated spin_lock/unlock(), set/clear_bit(),
calls across all branches, reducing code duplication.
Tested with selftests ./run_kselftest.sh -t mm:ksft_thp.sh,
all test cases passed.
Signed-off-by: Ran Xiaokai <ran.xiaokai@zte.com.cn>
---
mm/shmem.c | 88 ++++++++++++++++++++++++++----------------------------
1 file changed, 43 insertions(+), 45 deletions(-)
diff --git a/mm/shmem.c b/mm/shmem.c
index 3b5dc21b323c..0cc7872cc576 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -5526,6 +5526,29 @@ static ssize_t shmem_enabled_store(struct kobject *kobj,
struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled);
static DEFINE_SPINLOCK(huge_shmem_orders_lock);
+enum huge_shmem_enabled_mode {
+ HUGE_SHMEM_ENABLED_ALWAYS = 0,
+ HUGE_SHMEM_ENABLED_INHERIT,
+ HUGE_SHMEM_ENABLED_WITHIN_SIZE,
+ HUGE_SHMEM_ENABLED_ADVISE,
+ HUGE_SHMEM_ENABLED_NEVER,
+};
+
+static const char * const huge_shmem_enabled_mode_strings[] = {
+ [HUGE_SHMEM_ENABLED_ALWAYS] = "always",
+ [HUGE_SHMEM_ENABLED_INHERIT] = "inherit",
+ [HUGE_SHMEM_ENABLED_WITHIN_SIZE] = "within_size",
+ [HUGE_SHMEM_ENABLED_ADVISE] = "advise",
+ [HUGE_SHMEM_ENABLED_NEVER] = "never",
+};
+
+static unsigned long * const huge_shmem_orders_by_mode[] = {
+ [HUGE_SHMEM_ENABLED_ALWAYS] = &huge_shmem_orders_always,
+ [HUGE_SHMEM_ENABLED_INHERIT] = &huge_shmem_orders_inherit,
+ [HUGE_SHMEM_ENABLED_WITHIN_SIZE] = &huge_shmem_orders_within_size,
+ [HUGE_SHMEM_ENABLED_ADVISE] = &huge_shmem_orders_madvise,
+};
+
static ssize_t thpsize_shmem_enabled_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -5551,57 +5574,32 @@ static ssize_t thpsize_shmem_enabled_store(struct kobject *kobj,
const char *buf, size_t count)
{
int order = to_thpsize(kobj)->order;
+ int mode, m;
ssize_t ret = count;
+ int err;
- if (sysfs_streq(buf, "always")) {
- spin_lock(&huge_shmem_orders_lock);
- clear_bit(order, &huge_shmem_orders_inherit);
- clear_bit(order, &huge_shmem_orders_madvise);
- clear_bit(order, &huge_shmem_orders_within_size);
- set_bit(order, &huge_shmem_orders_always);
- spin_unlock(&huge_shmem_orders_lock);
- } else if (sysfs_streq(buf, "inherit")) {
- /* Do not override huge allocation policy with non-PMD sized mTHP */
- if (shmem_huge == SHMEM_HUGE_FORCE && !is_pmd_order(order))
- return -EINVAL;
+ mode = sysfs_match_string(huge_shmem_enabled_mode_strings, buf);
+ if (mode < 0)
+ return -EINVAL;
- spin_lock(&huge_shmem_orders_lock);
- clear_bit(order, &huge_shmem_orders_always);
- clear_bit(order, &huge_shmem_orders_madvise);
- clear_bit(order, &huge_shmem_orders_within_size);
- set_bit(order, &huge_shmem_orders_inherit);
- spin_unlock(&huge_shmem_orders_lock);
- } else if (sysfs_streq(buf, "within_size")) {
- spin_lock(&huge_shmem_orders_lock);
- clear_bit(order, &huge_shmem_orders_always);
- clear_bit(order, &huge_shmem_orders_inherit);
- clear_bit(order, &huge_shmem_orders_madvise);
- set_bit(order, &huge_shmem_orders_within_size);
- spin_unlock(&huge_shmem_orders_lock);
- } else if (sysfs_streq(buf, "advise")) {
- spin_lock(&huge_shmem_orders_lock);
- clear_bit(order, &huge_shmem_orders_always);
- clear_bit(order, &huge_shmem_orders_inherit);
- clear_bit(order, &huge_shmem_orders_within_size);
- set_bit(order, &huge_shmem_orders_madvise);
- spin_unlock(&huge_shmem_orders_lock);
- } else if (sysfs_streq(buf, "never")) {
- spin_lock(&huge_shmem_orders_lock);
- clear_bit(order, &huge_shmem_orders_always);
- clear_bit(order, &huge_shmem_orders_inherit);
- clear_bit(order, &huge_shmem_orders_within_size);
- clear_bit(order, &huge_shmem_orders_madvise);
- spin_unlock(&huge_shmem_orders_lock);
- } else {
- ret = -EINVAL;
+ /* Do not override huge allocation policy with non-PMD sized mTHP */
+ if (mode == HUGE_SHMEM_ENABLED_INHERIT &&
+ shmem_huge == SHMEM_HUGE_FORCE && !is_pmd_order(order))
+ return -EINVAL;
+
+ spin_lock(&huge_shmem_orders_lock);
+ for (m = 0; m < ARRAY_SIZE(huge_shmem_orders_by_mode); m++) {
+ if (m == mode)
+ set_bit(order, huge_shmem_orders_by_mode[m]);
+ else
+ clear_bit(order, huge_shmem_orders_by_mode[m]);
}
+ spin_unlock(&huge_shmem_orders_lock);
- if (ret > 0) {
- int err = start_stop_khugepaged();
+ err = start_stop_khugepaged();
+ if (err)
+ ret = err;
- if (err)
- ret = err;
- }
return ret;
}
--
2.25.1
^ permalink raw reply related [flat|nested] 2+ messages in thread
* [PATCH 2/2] mm: huge_memory: refactor thpsize_shmem_enabled_show() with helper arrays
2026-05-12 12:05 [PATCH 1/2] mm: huge_memory: refactor thpsize_shmem_enabled_store() with sysfs_match_string() ranxiaokai627
@ 2026-05-12 12:05 ` ranxiaokai627
0 siblings, 0 replies; 2+ messages in thread
From: ranxiaokai627 @ 2026-05-12 12:05 UTC (permalink / raw)
To: hughd, baolin.wang, akpm
Cc: linux-mm, linux-kernel, ran.xiaokai, ranxiaokai627
From: Ran Xiaokai <ran.xiaokai@zte.com.cn>
Replace the hardcoded if/else chain of test_bit() calls and string
literals in thpsize_shmem_enabled_show() with a loop over
huge_shmem_orders_by_mode[] and huge_shmem_enabled_mode_strings[] arrays.
This makes thpsize_shmem_enabled_show() consistent with
thpsize_shmem_enabled_store() and eliminates duplicated mode name strings.
Signed-off-by: Ran Xiaokai <ran.xiaokai@zte.com.cn>
---
mm/shmem.c | 36 +++++++++++++++++++++++-------------
1 file changed, 23 insertions(+), 13 deletions(-)
diff --git a/mm/shmem.c b/mm/shmem.c
index 0cc7872cc576..93fb0a5dca5c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -5553,20 +5553,30 @@ static ssize_t thpsize_shmem_enabled_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
int order = to_thpsize(kobj)->order;
- const char *output;
-
- if (test_bit(order, &huge_shmem_orders_always))
- output = "[always] inherit within_size advise never";
- else if (test_bit(order, &huge_shmem_orders_inherit))
- output = "always [inherit] within_size advise never";
- else if (test_bit(order, &huge_shmem_orders_within_size))
- output = "always inherit [within_size] advise never";
- else if (test_bit(order, &huge_shmem_orders_madvise))
- output = "always inherit within_size [advise] never";
- else
- output = "always inherit within_size advise [never]";
+ int active = HUGE_SHMEM_ENABLED_NEVER;
+ int len = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(huge_shmem_orders_by_mode); i++) {
+ if (test_bit(order, huge_shmem_orders_by_mode[i])) {
+ active = i;
+ break;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(huge_shmem_enabled_mode_strings); i++) {
+ if (i == active)
+ len += sysfs_emit_at(buf, len, "[%s] ",
+ huge_shmem_enabled_mode_strings[i]);
+ else
+ len += sysfs_emit_at(buf, len, "%s ",
+ huge_shmem_enabled_mode_strings[i]);
+ }
+
+ /* Replace trailing space with newline */
+ buf[len - 1] = '\n';
- return sysfs_emit(buf, "%s\n", output);
+ return len;
}
static ssize_t thpsize_shmem_enabled_store(struct kobject *kobj,
--
2.25.1
^ permalink raw reply related [flat|nested] 2+ messages in thread
end of thread, other threads:[~2026-05-12 12:07 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-05-12 12:05 [PATCH 1/2] mm: huge_memory: refactor thpsize_shmem_enabled_store() with sysfs_match_string() ranxiaokai627
2026-05-12 12:05 ` [PATCH 2/2] mm: huge_memory: refactor thpsize_shmem_enabled_show() with helper arrays ranxiaokai627
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox