public inbox for linux-mm@kvack.org
 help / color / mirror / Atom feed
From: Sourav Panda <souravpanda@google.com>
To: akpm@linux-foundation.org, linux-mm@kvack.org,
	 linux-kernel@vger.kernel.org
Cc: lsf-pc@lists.linux-foundation.org, songmuchun@bytedance.com,
	 osalvador@suse.de, mike.kravetz@oracle.com,
	mathieu.desnoyers@efficios.com,  willy@infradead.org,
	david@redhat.com, pasha.tatashin@soleen.com,
	 rientjes@google.com, weixugc@google.com, gthelen@google.com,
	 souravpanda@google.com, surenb@google.com
Subject: [LSF/MM/BPF TOPIC][RFC PATCH 2/2] mm/hugetlb: skip hugetlb shrinking for proactive reclaim
Date: Wed, 18 Mar 2026 23:41:26 +0000	[thread overview]
Message-ID: <20260318234126.3216529-3-souravpanda@google.com> (raw)
In-Reply-To: <20260318234126.3216529-1-souravpanda@google.com>

Scan control can indicate if we are in the proactive reclaim mode.

Pass that to shrinker control and preclude frozen memory hugetlb
shrinking if set.

Signed-off-by: Sourav Panda <souravpanda@google.com>
---
 include/linux/shrinker.h |  1 +
 mm/hugetlb.c             |  6 ++++++
 mm/internal.h            |  2 +-
 mm/shrinker.c            | 10 ++++++----
 mm/vmscan.c              |  6 +++---
 5 files changed, 17 insertions(+), 8 deletions(-)

diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 5374c251ee9e..973d5fd68803 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -52,6 +52,7 @@ struct shrink_control {
 	unsigned long nr_scanned;
 
 	s8 priority;
+	bool proactive;
 
 	/* current memcg being shrunk (for memcg aware shrinkers) */
 	struct mem_cgroup *memcg;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index d4953ff1dda1..a70aed7c8665 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4145,6 +4145,9 @@ static unsigned long hugepage_shrinker_count(struct shrinker *s,
 	if (sc->priority >= DEF_PRIORITY - 6)
 		return 0;
 
+	if (sc->proactive)
+		return 0;
+
 	if (!gigantic_page_runtime_supported())
 		return 0;
 
@@ -4193,6 +4196,9 @@ static unsigned long hugepage_shrinker_scan(struct shrinker *s,
 	if (sc->nr_to_scan == 0)
 		return SHRINK_STOP;
 
+	if (sc->proactive)
+		return SHRINK_STOP;
+
 	if (!gigantic_page_runtime_supported())
 		return SHRINK_STOP;
 
diff --git a/mm/internal.h b/mm/internal.h
index cb0af847d7d9..cccb68d723d4 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1660,7 +1660,7 @@ void __meminit __init_page_from_nid(unsigned long pfn, int nid);
 
 /* shrinker related functions */
 unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
-			  int priority);
+			  int priority, bool proactive);
 
 int shmem_add_to_page_cache(struct folio *folio,
 			    struct address_space *mapping,
diff --git a/mm/shrinker.c b/mm/shrinker.c
index 8a7a05182465..21b8f0b9d092 100644
--- a/mm/shrinker.c
+++ b/mm/shrinker.c
@@ -467,7 +467,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
 
 #ifdef CONFIG_MEMCG
 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
-			struct mem_cgroup *memcg, int priority)
+			struct mem_cgroup *memcg, int priority, bool proactive)
 {
 	struct shrinker_info *info;
 	unsigned long ret, freed = 0;
@@ -530,6 +530,7 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
 				.nid = nid,
 				.memcg = memcg,
 				.priority = priority,
+				.proactive = proactive,
 			};
 			struct shrinker *shrinker;
 			int shrinker_id = calc_shrinker_id(index, offset);
@@ -586,7 +587,7 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
 }
 #else /* !CONFIG_MEMCG */
 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
-			struct mem_cgroup *memcg, int priority)
+			struct mem_cgroup *memcg, int priority, bool proactive)
 {
 	return 0;
 }
@@ -613,7 +614,7 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
  * Returns the number of reclaimed slab objects.
  */
 unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
-			  int priority)
+			  int priority, bool proactive)
 {
 	unsigned long ret, freed = 0;
 	struct shrinker *shrinker;
@@ -626,7 +627,7 @@ unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
 	 * oom.
 	 */
 	if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))
-		return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
+		return shrink_slab_memcg(gfp_mask, nid, memcg, priority, proactive);
 
 	/*
 	 * lockless algorithm of global shrink.
@@ -656,6 +657,7 @@ unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
 			.nid = nid,
 			.memcg = memcg,
 			.priority = priority,
+			.proactive = proactive,
 		};
 
 		if (!shrinker_try_get(shrinker))
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 0fc9373e8251..39151d1edeff 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -432,7 +432,7 @@ static unsigned long drop_slab_node(int nid)
 
 	memcg = mem_cgroup_iter(NULL, NULL, NULL);
 	do {
-		freed += shrink_slab(GFP_KERNEL, nid, memcg, 0);
+		freed += shrink_slab(GFP_KERNEL, nid, memcg, 0, false);
 	} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
 
 	return freed;
@@ -4925,7 +4925,7 @@ static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
 
 	success = try_to_shrink_lruvec(lruvec, sc);
 
-	shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority);
+	shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority, sc->proactive);
 
 	if (!sc->proactive)
 		vmpressure(sc->gfp_mask, memcg, false, sc->nr_scanned - scanned,
@@ -6020,7 +6020,7 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
 		shrink_lruvec(lruvec, sc);
 
 		shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
-			    sc->priority);
+			    sc->priority, sc->proactive);
 
 		/* Record the group's reclaim efficiency */
 		if (!sc->proactive)
-- 
2.53.0.983.g0bb29b3bc5-goog



      parent reply	other threads:[~2026-03-18 23:41 UTC|newest]

Thread overview: 3+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-03-18 23:41 [LSF/MM/BPF TOPIC][RFC PATCH 0/2] Hugetlb Fungibility for page metadata savings and network performance Sourav Panda
2026-03-18 23:41 ` [LSF/MM/BPF TOPIC][RFC PATCH 1/2] mm: add hugepage shrinker for frozen memory Sourav Panda
2026-03-18 23:41 ` Sourav Panda [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260318234126.3216529-3-souravpanda@google.com \
    --to=souravpanda@google.com \
    --cc=akpm@linux-foundation.org \
    --cc=david@redhat.com \
    --cc=gthelen@google.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=lsf-pc@lists.linux-foundation.org \
    --cc=mathieu.desnoyers@efficios.com \
    --cc=mike.kravetz@oracle.com \
    --cc=osalvador@suse.de \
    --cc=pasha.tatashin@soleen.com \
    --cc=rientjes@google.com \
    --cc=songmuchun@bytedance.com \
    --cc=surenb@google.com \
    --cc=weixugc@google.com \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox