Linux-mm Archive on lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 1/2] mm: huge_memory: refactor thpsize_shmem_enabled_store() with sysfs_match_string()
@ 2026-05-13  9:45 ranxiaokai627
  2026-05-13  9:45 ` [PATCH v2 2/2] mm: huge_memory: refactor thpsize_shmem_enabled_show() with helper arrays ranxiaokai627
                   ` (2 more replies)
  0 siblings, 3 replies; 8+ messages in thread
From: ranxiaokai627 @ 2026-05-13  9:45 UTC (permalink / raw)
  To: hughd, baolin.wang, akpm
  Cc: leitao, ljs, linux-mm, linux-kernel, ran.xiaokai, ranxiaokai627

From: Ran Xiaokai <ran.xiaokai@zte.com.cn>

Inspired by commit 82d9ff648c6c ("mm: huge_memory: refactor
anon_enabled_store() with set_anon_enabled_mode()"), refactor
thpsize_shmem_enabled_store() using sysfs_match_string().
This eliminates the duplicated spin_lock/unlock(), set/clear_bit(),
calls across all branches, reducing code duplication.

Tested with selftests ./run_kselftest.sh -t mm:ksft_thp.sh,
all test cases passed.

Signed-off-by: Ran Xiaokai <ran.xiaokai@zte.com.cn>
---
 mm/shmem.c | 94 +++++++++++++++++++++++++++++-------------------------
 1 file changed, 51 insertions(+), 43 deletions(-)

diff --git a/mm/shmem.c b/mm/shmem.c
index 3b5dc21b323c..60cb10854f11 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -5526,6 +5526,29 @@ static ssize_t shmem_enabled_store(struct kobject *kobj,
 struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled);
 static DEFINE_SPINLOCK(huge_shmem_orders_lock);
 
+enum huge_shmem_enabled_mode {
+	HUGE_SHMEM_ENABLED_ALWAYS = 0,
+	HUGE_SHMEM_ENABLED_INHERIT,
+	HUGE_SHMEM_ENABLED_WITHIN_SIZE,
+	HUGE_SHMEM_ENABLED_ADVISE,
+	HUGE_SHMEM_ENABLED_NEVER,
+};
+
+static const char * const huge_shmem_enabled_mode_strings[] = {
+	[HUGE_SHMEM_ENABLED_ALWAYS]      = "always",
+	[HUGE_SHMEM_ENABLED_INHERIT]     = "inherit",
+	[HUGE_SHMEM_ENABLED_WITHIN_SIZE] = "within_size",
+	[HUGE_SHMEM_ENABLED_ADVISE]      = "advise",
+	[HUGE_SHMEM_ENABLED_NEVER]       = "never",
+};
+
+static unsigned long * const huge_shmem_orders_by_mode[] = {
+	[HUGE_SHMEM_ENABLED_ALWAYS]      = &huge_shmem_orders_always,
+	[HUGE_SHMEM_ENABLED_INHERIT]     = &huge_shmem_orders_inherit,
+	[HUGE_SHMEM_ENABLED_WITHIN_SIZE] = &huge_shmem_orders_within_size,
+	[HUGE_SHMEM_ENABLED_ADVISE]      = &huge_shmem_orders_madvise,
+};
+
 static ssize_t thpsize_shmem_enabled_show(struct kobject *kobj,
 					  struct kobj_attribute *attr, char *buf)
 {
@@ -5551,57 +5574,42 @@ static ssize_t thpsize_shmem_enabled_store(struct kobject *kobj,
 					   const char *buf, size_t count)
 {
 	int order = to_thpsize(kobj)->order;
+	int mode, m;
 	ssize_t ret = count;
+	int err;
+	bool changed = false;
 
-	if (sysfs_streq(buf, "always")) {
-		spin_lock(&huge_shmem_orders_lock);
-		clear_bit(order, &huge_shmem_orders_inherit);
-		clear_bit(order, &huge_shmem_orders_madvise);
-		clear_bit(order, &huge_shmem_orders_within_size);
-		set_bit(order, &huge_shmem_orders_always);
-		spin_unlock(&huge_shmem_orders_lock);
-	} else if (sysfs_streq(buf, "inherit")) {
-		/* Do not override huge allocation policy with non-PMD sized mTHP */
-		if (shmem_huge == SHMEM_HUGE_FORCE && !is_pmd_order(order))
-			return -EINVAL;
+	mode = sysfs_match_string(huge_shmem_enabled_mode_strings, buf);
+	if (mode < 0)
+		return -EINVAL;
 
-		spin_lock(&huge_shmem_orders_lock);
-		clear_bit(order, &huge_shmem_orders_always);
-		clear_bit(order, &huge_shmem_orders_madvise);
-		clear_bit(order, &huge_shmem_orders_within_size);
-		set_bit(order, &huge_shmem_orders_inherit);
-		spin_unlock(&huge_shmem_orders_lock);
-	} else if (sysfs_streq(buf, "within_size")) {
-		spin_lock(&huge_shmem_orders_lock);
-		clear_bit(order, &huge_shmem_orders_always);
-		clear_bit(order, &huge_shmem_orders_inherit);
-		clear_bit(order, &huge_shmem_orders_madvise);
-		set_bit(order, &huge_shmem_orders_within_size);
-		spin_unlock(&huge_shmem_orders_lock);
-	} else if (sysfs_streq(buf, "advise")) {
-		spin_lock(&huge_shmem_orders_lock);
-		clear_bit(order, &huge_shmem_orders_always);
-		clear_bit(order, &huge_shmem_orders_inherit);
-		clear_bit(order, &huge_shmem_orders_within_size);
-		set_bit(order, &huge_shmem_orders_madvise);
-		spin_unlock(&huge_shmem_orders_lock);
-	} else if (sysfs_streq(buf, "never")) {
-		spin_lock(&huge_shmem_orders_lock);
-		clear_bit(order, &huge_shmem_orders_always);
-		clear_bit(order, &huge_shmem_orders_inherit);
-		clear_bit(order, &huge_shmem_orders_within_size);
-		clear_bit(order, &huge_shmem_orders_madvise);
-		spin_unlock(&huge_shmem_orders_lock);
-	} else {
-		ret = -EINVAL;
-	}
+	/* Do not override huge allocation policy with non-PMD sized mTHP */
+	if (mode == HUGE_SHMEM_ENABLED_INHERIT &&
+		shmem_huge == SHMEM_HUGE_FORCE && !is_pmd_order(order))
+		return -EINVAL;
 
-	if (ret > 0) {
-		int err = start_stop_khugepaged();
+	spin_lock(&huge_shmem_orders_lock);
+	for (m = 0; m < ARRAY_SIZE(huge_shmem_orders_by_mode); m++) {
+		if (m == mode)
+			changed |= !__test_and_set_bit(order, huge_shmem_orders_by_mode[m]);
+		else
+			changed |= __test_and_clear_bit(order, huge_shmem_orders_by_mode[m]);
+	}
+	spin_unlock(&huge_shmem_orders_lock);
 
+	if (changed) {
+		err = start_stop_khugepaged();
 		if (err)
 			ret = err;
+	} else {
+		/*
+		 * Recalculate watermarks even when the mode hasn't changed
+		 * to preserve the legacy behavior, as this is always called
+		 * inside start_stop_khugepaged().
+		 */
+		set_recommended_min_free_kbytes();
 	}
+
 	return ret;
 }
 
-- 
2.25.1




^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v2 2/2] mm: huge_memory: refactor thpsize_shmem_enabled_show() with helper arrays
  2026-05-13  9:45 [PATCH v2 1/2] mm: huge_memory: refactor thpsize_shmem_enabled_store() with sysfs_match_string() ranxiaokai627
@ 2026-05-13  9:45 ` ranxiaokai627
  2026-05-14  2:41   ` Baolin Wang
  2026-05-14  9:08   ` Breno Leitao
  2026-05-14  2:36 ` [PATCH v2 1/2] mm: huge_memory: refactor thpsize_shmem_enabled_store() with sysfs_match_string() Baolin Wang
  2026-05-14  8:33 ` Breno Leitao
  2 siblings, 2 replies; 8+ messages in thread
From: ranxiaokai627 @ 2026-05-13  9:45 UTC (permalink / raw)
  To: hughd, baolin.wang, akpm
  Cc: leitao, ljs, linux-mm, linux-kernel, ran.xiaokai, ranxiaokai627

From: Ran Xiaokai <ran.xiaokai@zte.com.cn>

Replace the hardcoded if/else chain of test_bit() calls and string
literals in thpsize_shmem_enabled_show() with a loop over
huge_shmem_orders_by_mode[] and huge_shmem_enabled_mode_strings[] arrays.

This makes thpsize_shmem_enabled_show() consistent with
thpsize_shmem_enabled_store() and eliminates duplicated mode name strings.

Signed-off-by: Ran Xiaokai <ran.xiaokai@zte.com.cn>
---
 mm/shmem.c | 36 +++++++++++++++++++++++-------------
 1 file changed, 23 insertions(+), 13 deletions(-)

diff --git a/mm/shmem.c b/mm/shmem.c
index 60cb10854f11..086762e6de71 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -5553,20 +5553,30 @@ static ssize_t thpsize_shmem_enabled_show(struct kobject *kobj,
 					  struct kobj_attribute *attr, char *buf)
 {
 	int order = to_thpsize(kobj)->order;
-	const char *output;
-
-	if (test_bit(order, &huge_shmem_orders_always))
-		output = "[always] inherit within_size advise never";
-	else if (test_bit(order, &huge_shmem_orders_inherit))
-		output = "always [inherit] within_size advise never";
-	else if (test_bit(order, &huge_shmem_orders_within_size))
-		output = "always inherit [within_size] advise never";
-	else if (test_bit(order, &huge_shmem_orders_madvise))
-		output = "always inherit within_size [advise] never";
-	else
-		output = "always inherit within_size advise [never]";
+	int active = HUGE_SHMEM_ENABLED_NEVER;
+	int len = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(huge_shmem_orders_by_mode); i++) {
+		if (test_bit(order, huge_shmem_orders_by_mode[i])) {
+			active = i;
+			break;
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(huge_shmem_enabled_mode_strings); i++) {
+		if (i == active)
+			len += sysfs_emit_at(buf, len, "[%s] ",
+						huge_shmem_enabled_mode_strings[i]);
+		else
+			len += sysfs_emit_at(buf, len, "%s ",
+						huge_shmem_enabled_mode_strings[i]);
+	}
+
+	/* Replace trailing space with newline */
+	buf[len - 1] = '\n';
 
-	return sysfs_emit(buf, "%s\n", output);
+	return len;
 }
 
 static ssize_t thpsize_shmem_enabled_store(struct kobject *kobj,
-- 
2.25.1




^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH v2 1/2] mm: huge_memory: refactor thpsize_shmem_enabled_store() with sysfs_match_string()
  2026-05-13  9:45 [PATCH v2 1/2] mm: huge_memory: refactor thpsize_shmem_enabled_store() with sysfs_match_string() ranxiaokai627
  2026-05-13  9:45 ` [PATCH v2 2/2] mm: huge_memory: refactor thpsize_shmem_enabled_show() with helper arrays ranxiaokai627
@ 2026-05-14  2:36 ` Baolin Wang
  2026-05-14 10:10   ` ranxiaokai627
  2026-05-14  8:33 ` Breno Leitao
  2 siblings, 1 reply; 8+ messages in thread
From: Baolin Wang @ 2026-05-14  2:36 UTC (permalink / raw)
  To: ranxiaokai627, hughd, akpm
  Cc: leitao, ljs, linux-mm, linux-kernel, ran.xiaokai

The subject line should be: "mm: shmem: xxx"

On 5/13/26 5:45 PM, ranxiaokai627@163.com wrote:
> From: Ran Xiaokai <ran.xiaokai@zte.com.cn>
> 
> Inspired by commit 82d9ff648c6c ("mm: huge_memory: refactor
> anon_enabled_store() with set_anon_enabled_mode()"), refactor
> thpsize_shmem_enabled_store() using sysfs_match_string().
> This eliminates the duplicated spin_lock/unlock(), set/clear_bit(),
> calls across all branches, reducing code duplication.
> 
> Tested with selftests ./run_kselftest.sh -t mm:ksft_thp.sh,
> all test cases passed.
> 
> Signed-off-by: Ran Xiaokai <ran.xiaokai@zte.com.cn>
> ---

You should document your changes under the '---' for the new version, or 
ideally, include a cover letter describing them.

I did some togging and works well (some nits as below). Feel free to add:
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Tested-by: Baolin Wang <baolin.wang@linux.alibaba.com>

>   mm/shmem.c | 94 +++++++++++++++++++++++++++++-------------------------
>   1 file changed, 51 insertions(+), 43 deletions(-)
> 
> diff --git a/mm/shmem.c b/mm/shmem.c
> index 3b5dc21b323c..60cb10854f11 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -5526,6 +5526,29 @@ static ssize_t shmem_enabled_store(struct kobject *kobj,
>   struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled);
>   static DEFINE_SPINLOCK(huge_shmem_orders_lock);
>   
> +enum huge_shmem_enabled_mode {
> +	HUGE_SHMEM_ENABLED_ALWAYS = 0,
> +	HUGE_SHMEM_ENABLED_INHERIT,
> +	HUGE_SHMEM_ENABLED_WITHIN_SIZE,
> +	HUGE_SHMEM_ENABLED_ADVISE,
> +	HUGE_SHMEM_ENABLED_NEVER,
> +};
> +
> +static const char * const huge_shmem_enabled_mode_strings[] = {
> +	[HUGE_SHMEM_ENABLED_ALWAYS]      = "always",
> +	[HUGE_SHMEM_ENABLED_INHERIT]     = "inherit",
> +	[HUGE_SHMEM_ENABLED_WITHIN_SIZE] = "within_size",
> +	[HUGE_SHMEM_ENABLED_ADVISE]      = "advise",
> +	[HUGE_SHMEM_ENABLED_NEVER]       = "never",
> +};
> +
> +static unsigned long * const huge_shmem_orders_by_mode[] = {
> +	[HUGE_SHMEM_ENABLED_ALWAYS]      = &huge_shmem_orders_always,
> +	[HUGE_SHMEM_ENABLED_INHERIT]     = &huge_shmem_orders_inherit,
> +	[HUGE_SHMEM_ENABLED_WITHIN_SIZE] = &huge_shmem_orders_within_size,
> +	[HUGE_SHMEM_ENABLED_ADVISE]      = &huge_shmem_orders_madvise,
> +};
> +
>   static ssize_t thpsize_shmem_enabled_show(struct kobject *kobj,
>   					  struct kobj_attribute *attr, char *buf)
>   {
> @@ -5551,57 +5574,42 @@ static ssize_t thpsize_shmem_enabled_store(struct kobject *kobj,
>   					   const char *buf, size_t count)
>   {
>   	int order = to_thpsize(kobj)->order;
> +	int mode, m;
>   	ssize_t ret = count;
> +	int err;
> +	bool changed = false;
>   
> -	if (sysfs_streq(buf, "always")) {
> -		spin_lock(&huge_shmem_orders_lock);
> -		clear_bit(order, &huge_shmem_orders_inherit);
> -		clear_bit(order, &huge_shmem_orders_madvise);
> -		clear_bit(order, &huge_shmem_orders_within_size);
> -		set_bit(order, &huge_shmem_orders_always);
> -		spin_unlock(&huge_shmem_orders_lock);
> -	} else if (sysfs_streq(buf, "inherit")) {
> -		/* Do not override huge allocation policy with non-PMD sized mTHP */
> -		if (shmem_huge == SHMEM_HUGE_FORCE && !is_pmd_order(order))
> -			return -EINVAL;
> +	mode = sysfs_match_string(huge_shmem_enabled_mode_strings, buf);
> +	if (mode < 0)
> +		return -EINVAL;
>   
> -		spin_lock(&huge_shmem_orders_lock);
> -		clear_bit(order, &huge_shmem_orders_always);
> -		clear_bit(order, &huge_shmem_orders_madvise);
> -		clear_bit(order, &huge_shmem_orders_within_size);
> -		set_bit(order, &huge_shmem_orders_inherit);
> -		spin_unlock(&huge_shmem_orders_lock);
> -	} else if (sysfs_streq(buf, "within_size")) {
> -		spin_lock(&huge_shmem_orders_lock);
> -		clear_bit(order, &huge_shmem_orders_always);
> -		clear_bit(order, &huge_shmem_orders_inherit);
> -		clear_bit(order, &huge_shmem_orders_madvise);
> -		set_bit(order, &huge_shmem_orders_within_size);
> -		spin_unlock(&huge_shmem_orders_lock);
> -	} else if (sysfs_streq(buf, "advise")) {
> -		spin_lock(&huge_shmem_orders_lock);
> -		clear_bit(order, &huge_shmem_orders_always);
> -		clear_bit(order, &huge_shmem_orders_inherit);
> -		clear_bit(order, &huge_shmem_orders_within_size);
> -		set_bit(order, &huge_shmem_orders_madvise);
> -		spin_unlock(&huge_shmem_orders_lock);
> -	} else if (sysfs_streq(buf, "never")) {
> -		spin_lock(&huge_shmem_orders_lock);
> -		clear_bit(order, &huge_shmem_orders_always);
> -		clear_bit(order, &huge_shmem_orders_inherit);
> -		clear_bit(order, &huge_shmem_orders_within_size);
> -		clear_bit(order, &huge_shmem_orders_madvise);
> -		spin_unlock(&huge_shmem_orders_lock);
> -	} else {
> -		ret = -EINVAL;
> -	}
> +	/* Do not override huge allocation policy with non-PMD sized mTHP */
> +	if (mode == HUGE_SHMEM_ENABLED_INHERIT &&
> +		shmem_huge == SHMEM_HUGE_FORCE && !is_pmd_order(order))
> +		return -EINVAL;
>   
> -	if (ret > 0) {
> -		int err = start_stop_khugepaged();
> +	spin_lock(&huge_shmem_orders_lock);
> +	for (m = 0; m < ARRAY_SIZE(huge_shmem_orders_by_mode); m++) {
> +		if (m == mode)
> +			changed |= !__test_and_set_bit(order, huge_shmem_orders_by_mode[m]);
> +		else
> +			changed |= __test_and_clear_bit(order, huge_shmem_orders_by_mode[m]);
> +	}
> +	spin_unlock(&huge_shmem_orders_lock);
>   
> +	if (changed) {
> +		err = start_stop_khugepaged();
>   		if (err)
>   			ret = err;

Nit: just return err.

> +	} else {
> +		/*
> +		 * Recalculate watermarks even when the mode hasn't changed
> +		 * to preserve the legacy behavior, as this is always called
> +		 * inside start_stop_khugepaged().
> +		 */
> +		set_recommended_min_free_kbytes();
>   	}
> +
>   	return ret;

Nit: return count, then you can remove the 'ret' variable.


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v2 2/2] mm: huge_memory: refactor thpsize_shmem_enabled_show() with helper arrays
  2026-05-13  9:45 ` [PATCH v2 2/2] mm: huge_memory: refactor thpsize_shmem_enabled_show() with helper arrays ranxiaokai627
@ 2026-05-14  2:41   ` Baolin Wang
  2026-05-14  9:08   ` Breno Leitao
  1 sibling, 0 replies; 8+ messages in thread
From: Baolin Wang @ 2026-05-14  2:41 UTC (permalink / raw)
  To: ranxiaokai627, hughd, akpm
  Cc: leitao, ljs, linux-mm, linux-kernel, ran.xiaokai

Subject line should be: 'mm: shmem: xxx'

On 5/13/26 5:45 PM, ranxiaokai627@163.com wrote:
> From: Ran Xiaokai <ran.xiaokai@zte.com.cn>
> 
> Replace the hardcoded if/else chain of test_bit() calls and string
> literals in thpsize_shmem_enabled_show() with a loop over
> huge_shmem_orders_by_mode[] and huge_shmem_enabled_mode_strings[] arrays.
> 
> This makes thpsize_shmem_enabled_show() consistent with
> thpsize_shmem_enabled_store() and eliminates duplicated mode name strings.
> 
> Signed-off-by: Ran Xiaokai <ran.xiaokai@zte.com.cn>
> ---

LGTM. Thanks.
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Tested-by: Baolin Wang <baolin.wang@linux.alibaba.com>


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v2 1/2] mm: huge_memory: refactor thpsize_shmem_enabled_store() with sysfs_match_string()
  2026-05-13  9:45 [PATCH v2 1/2] mm: huge_memory: refactor thpsize_shmem_enabled_store() with sysfs_match_string() ranxiaokai627
  2026-05-13  9:45 ` [PATCH v2 2/2] mm: huge_memory: refactor thpsize_shmem_enabled_show() with helper arrays ranxiaokai627
  2026-05-14  2:36 ` [PATCH v2 1/2] mm: huge_memory: refactor thpsize_shmem_enabled_store() with sysfs_match_string() Baolin Wang
@ 2026-05-14  8:33 ` Breno Leitao
  2026-05-14  9:26   ` ranxiaokai627
  2 siblings, 1 reply; 8+ messages in thread
From: Breno Leitao @ 2026-05-14  8:33 UTC (permalink / raw)
  To: ranxiaokai627
  Cc: hughd, baolin.wang, akpm, ljs, linux-mm, linux-kernel,
	ran.xiaokai

On Wed, May 13, 2026 at 09:45:07AM +0000, ranxiaokai627@163.com wrote:
> From: Ran Xiaokai <ran.xiaokai@zte.com.cn>
> 
> Inspired by commit 82d9ff648c6c ("mm: huge_memory: refactor
> anon_enabled_store() with set_anon_enabled_mode()"), refactor
> thpsize_shmem_enabled_store() using sysfs_match_string().
> This eliminates the duplicated spin_lock/unlock(), set/clear_bit(),
> calls across all branches, reducing code duplication.
> 
> Tested with selftests ./run_kselftest.sh -t mm:ksft_thp.sh,
> all test cases passed.
> 
> Signed-off-by: Ran Xiaokai <ran.xiaokai@zte.com.cn>

Reviewed-by: Breno Leitao <leitao@debian.org>

> +	/* Do not override huge allocation policy with non-PMD sized mTHP */
> +	if (mode == HUGE_SHMEM_ENABLED_INHERIT &&
> +		shmem_huge == SHMEM_HUGE_FORCE && !is_pmd_order(order))
> +		return -EINVAL;

This identation seems a bit broken. You probably want something like:

	if (mode == HUGE_SHMEM_ENABLED_INHERIT &&
	    shmem_huge == SHMEM_HUGE_FORCE && !is_pmd_order(order))
		return -EINVAL;


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v2 2/2] mm: huge_memory: refactor thpsize_shmem_enabled_show() with helper arrays
  2026-05-13  9:45 ` [PATCH v2 2/2] mm: huge_memory: refactor thpsize_shmem_enabled_show() with helper arrays ranxiaokai627
  2026-05-14  2:41   ` Baolin Wang
@ 2026-05-14  9:08   ` Breno Leitao
  1 sibling, 0 replies; 8+ messages in thread
From: Breno Leitao @ 2026-05-14  9:08 UTC (permalink / raw)
  To: ranxiaokai627
  Cc: hughd, baolin.wang, akpm, ljs, linux-mm, linux-kernel,
	ran.xiaokai

On Wed, May 13, 2026 at 09:45:08AM +0000, ranxiaokai627@163.com wrote:
> From: Ran Xiaokai <ran.xiaokai@zte.com.cn>
> 
> Replace the hardcoded if/else chain of test_bit() calls and string
> literals in thpsize_shmem_enabled_show() with a loop over
> huge_shmem_orders_by_mode[] and huge_shmem_enabled_mode_strings[] arrays.
> 
> This makes thpsize_shmem_enabled_show() consistent with
> thpsize_shmem_enabled_store() and eliminates duplicated mode name strings.
> 
> Signed-off-by: Ran Xiaokai <ran.xiaokai@zte.com.cn>

Reviewed-by: Breno Leitao <leitao@debian.org>


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v2 1/2] mm: huge_memory: refactor thpsize_shmem_enabled_store() with sysfs_match_string()
  2026-05-14  8:33 ` Breno Leitao
@ 2026-05-14  9:26   ` ranxiaokai627
  0 siblings, 0 replies; 8+ messages in thread
From: ranxiaokai627 @ 2026-05-14  9:26 UTC (permalink / raw)
  To: leitao
  Cc: akpm, baolin.wang, hughd, linux-kernel, linux-mm, ljs,
	ran.xiaokai, ranxiaokai627

>On Wed, May 13, 2026 at 09:45:07AM +0000, ranxiaokai627@163.com wrote:
>> From: Ran Xiaokai <ran.xiaokai@zte.com.cn>
>> 
>> Inspired by commit 82d9ff648c6c ("mm: huge_memory: refactor
>> anon_enabled_store() with set_anon_enabled_mode()"), refactor
>> thpsize_shmem_enabled_store() using sysfs_match_string().
>> This eliminates the duplicated spin_lock/unlock(), set/clear_bit(),
>> calls across all branches, reducing code duplication.
>> 
>> Tested with selftests ./run_kselftest.sh -t mm:ksft_thp.sh,
>> all test cases passed.
>> 
>> Signed-off-by: Ran Xiaokai <ran.xiaokai@zte.com.cn>
>
>Reviewed-by: Breno Leitao <leitao@debian.org>
>
>> +	/* Do not override huge allocation policy with non-PMD sized mTHP */
>> +	if (mode == HUGE_SHMEM_ENABLED_INHERIT &&
>> +		shmem_huge == SHMEM_HUGE_FORCE && !is_pmd_order(order))
>> +		return -EINVAL;
>
>This identation seems a bit broken. You probably want something like:
>
>	if (mode == HUGE_SHMEM_ENABLED_INHERIT &&
>	    shmem_huge == SHMEM_HUGE_FORCE && !is_pmd_order(order))
>		return -EINVAL;

Thanks for the detailed and thoughtful review! I'll fix this as 
suggested in the next version.



^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v2 1/2] mm: huge_memory: refactor thpsize_shmem_enabled_store() with sysfs_match_string()
  2026-05-14  2:36 ` [PATCH v2 1/2] mm: huge_memory: refactor thpsize_shmem_enabled_store() with sysfs_match_string() Baolin Wang
@ 2026-05-14 10:10   ` ranxiaokai627
  0 siblings, 0 replies; 8+ messages in thread
From: ranxiaokai627 @ 2026-05-14 10:10 UTC (permalink / raw)
  To: baolin.wang
  Cc: akpm, hughd, leitao, linux-kernel, linux-mm, ljs, ran.xiaokai,
	ranxiaokai627

>The subject line should be: "mm: shmem: xxx"

Yes, of course.
Thanks for your review.

>On 5/13/26 5:45 PM, ranxiaokai627@163.com wrote:
>> From: Ran Xiaokai <ran.xiaokai@zte.com.cn>
>> 
>> Inspired by commit 82d9ff648c6c ("mm: huge_memory: refactor
>> anon_enabled_store() with set_anon_enabled_mode()"), refactor
>> thpsize_shmem_enabled_store() using sysfs_match_string().
>> This eliminates the duplicated spin_lock/unlock(), set/clear_bit(),
>> calls across all branches, reducing code duplication.
>> 
>> Tested with selftests ./run_kselftest.sh -t mm:ksft_thp.sh,
>> all test cases passed.
>> 
>> Signed-off-by: Ran Xiaokai <ran.xiaokai@zte.com.cn>
>> ---
>
>You should document your changes under the '---' for the new version, or 
>ideally, include a cover letter describing them.

Thanks for the reminder!
Will do in the next version.

>I did some togging and works well (some nits as below). Feel free to add:
>Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
>Tested-by: Baolin Wang <baolin.wang@linux.alibaba.com>
>
>>   mm/shmem.c | 94 +++++++++++++++++++++++++++++-------------------------
>>   1 file changed, 51 insertions(+), 43 deletions(-)
>> 
>> diff --git a/mm/shmem.c b/mm/shmem.c
>> index 3b5dc21b323c..60cb10854f11 100644
>> --- a/mm/shmem.c
>> +++ b/mm/shmem.c
>> @@ -5526,6 +5526,29 @@ static ssize_t shmem_enabled_store(struct kobject *kobj,
>>   struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled);
>>   static DEFINE_SPINLOCK(huge_shmem_orders_lock);
>>   
>> +enum huge_shmem_enabled_mode {
>> +	HUGE_SHMEM_ENABLED_ALWAYS = 0,
>> +	HUGE_SHMEM_ENABLED_INHERIT,
>> +	HUGE_SHMEM_ENABLED_WITHIN_SIZE,
>> +	HUGE_SHMEM_ENABLED_ADVISE,
>> +	HUGE_SHMEM_ENABLED_NEVER,
>> +};
>> +
>> +static const char * const huge_shmem_enabled_mode_strings[] = {
>> +	[HUGE_SHMEM_ENABLED_ALWAYS]      = "always",
>> +	[HUGE_SHMEM_ENABLED_INHERIT]     = "inherit",
>> +	[HUGE_SHMEM_ENABLED_WITHIN_SIZE] = "within_size",
>> +	[HUGE_SHMEM_ENABLED_ADVISE]      = "advise",
>> +	[HUGE_SHMEM_ENABLED_NEVER]       = "never",
>> +};
>> +
>> +static unsigned long * const huge_shmem_orders_by_mode[] = {
>> +	[HUGE_SHMEM_ENABLED_ALWAYS]      = &huge_shmem_orders_always,
>> +	[HUGE_SHMEM_ENABLED_INHERIT]     = &huge_shmem_orders_inherit,
>> +	[HUGE_SHMEM_ENABLED_WITHIN_SIZE] = &huge_shmem_orders_within_size,
>> +	[HUGE_SHMEM_ENABLED_ADVISE]      = &huge_shmem_orders_madvise,
>> +};
>> +
>>   static ssize_t thpsize_shmem_enabled_show(struct kobject *kobj,
>>   					  struct kobj_attribute *attr, char *buf)
>>   {
>> @@ -5551,57 +5574,42 @@ static ssize_t thpsize_shmem_enabled_store(struct kobject *kobj,
>>   					   const char *buf, size_t count)
>>   {
>>   	int order = to_thpsize(kobj)->order;
>> +	int mode, m;
>>   	ssize_t ret = count;
>> +	int err;
>> +	bool changed = false;
>> +	/* Do not override huge allocation policy with non-PMD sized mTHP */
>> +	if (mode == HUGE_SHMEM_ENABLED_INHERIT &&
>> +		shmem_huge == SHMEM_HUGE_FORCE && !is_pmd_order(order))
>> +		return -EINVAL;
>>   
>> -	if (ret > 0) {
>> -		int err = start_stop_khugepaged();
>> +	spin_lock(&huge_shmem_orders_lock);
>> +	for (m = 0; m < ARRAY_SIZE(huge_shmem_orders_by_mode); m++) {
>> +		if (m == mode)
>> +			changed |= !__test_and_set_bit(order, huge_shmem_orders_by_mode[m]);
>> +		else
>> +			changed |= __test_and_clear_bit(order, huge_shmem_orders_by_mode[m]);
>> +	}
>> +	spin_unlock(&huge_shmem_orders_lock);
>>   
>> +	if (changed) {
>> +		err = start_stop_khugepaged();
>>   		if (err)
>>   			ret = err;
>
>Nit: just return err.
>
>> +	} else {
>> +		/*
>> +		 * Recalculate watermarks even when the mode hasn't changed
>> +		 * to preserve the legacy behavior, as this is always called
>> +		 * inside start_stop_khugepaged().
>> +		 */
>> +		set_recommended_min_free_kbytes();
>>   	}
>> +
>>   	return ret;
>
>Nit: return count, then you can remove the 'ret' variable.

Agreed.
I'll wait for more feedback and send a new version.



^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2026-05-14 10:10 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-05-13  9:45 [PATCH v2 1/2] mm: huge_memory: refactor thpsize_shmem_enabled_store() with sysfs_match_string() ranxiaokai627
2026-05-13  9:45 ` [PATCH v2 2/2] mm: huge_memory: refactor thpsize_shmem_enabled_show() with helper arrays ranxiaokai627
2026-05-14  2:41   ` Baolin Wang
2026-05-14  9:08   ` Breno Leitao
2026-05-14  2:36 ` [PATCH v2 1/2] mm: huge_memory: refactor thpsize_shmem_enabled_store() with sysfs_match_string() Baolin Wang
2026-05-14 10:10   ` ranxiaokai627
2026-05-14  8:33 ` Breno Leitao
2026-05-14  9:26   ` ranxiaokai627

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox