From: Davidlohr Bueso <dave@stgolabs.net>
To: akpm@linux-foundation.org
Cc: mhocko@kernel.org, hannes@cmpxchg.org, roman.gushchin@linux.dev,
shakeel.butt@linux.dev, yosryahmed@google.com,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
dave@stgolabs.net
Subject: [PATCH 3/4] mm/vmscan: make __node_reclaim() more generic
Date: Mon, 23 Jun 2025 11:58:50 -0700 [thread overview]
Message-ID: <20250623185851.830632-4-dave@stgolabs.net> (raw)
In-Reply-To: <20250623185851.830632-1-dave@stgolabs.net>
As this will be called from non page allocator paths for
proactive reclaim, allow users to pass the sc and nr of
pages, and adjust the return value as well. No change in
semantics.
Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
---
mm/vmscan.c | 48 +++++++++++++++++++++++++-----------------------
1 file changed, 25 insertions(+), 23 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 63ddec550c3b..cdd9cb97fb79 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -7618,36 +7618,26 @@ static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat)
/*
* Try to free up some pages from this node through reclaim.
*/
-static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
+static unsigned long __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask,
+ unsigned long nr_pages,
+ struct scan_control *sc)
{
- /* Minimum pages needed in order to stay on node */
- const unsigned long nr_pages = 1 << order;
struct task_struct *p = current;
unsigned int noreclaim_flag;
- struct scan_control sc = {
- .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
- .gfp_mask = current_gfp_context(gfp_mask),
- .order = order,
- .priority = NODE_RECLAIM_PRIORITY,
- .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
- .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
- .may_swap = 1,
- .reclaim_idx = gfp_zone(gfp_mask),
- };
unsigned long pflags;
- trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order,
- sc.gfp_mask);
+ trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, sc->order,
+ sc->gfp_mask);
cond_resched();
psi_memstall_enter(&pflags);
delayacct_freepages_start();
- fs_reclaim_acquire(sc.gfp_mask);
+ fs_reclaim_acquire(sc->gfp_mask);
/*
* We need to be able to allocate from the reserves for RECLAIM_UNMAP
*/
noreclaim_flag = memalloc_noreclaim_save();
- set_task_reclaim_state(p, &sc.reclaim_state);
+ set_task_reclaim_state(p, &sc->reclaim_state);
if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages ||
node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) > pgdat->min_slab_pages) {
@@ -7656,24 +7646,36 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
* priorities until we have enough memory freed.
*/
do {
- shrink_node(pgdat, &sc);
- } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
+ shrink_node(pgdat, sc);
+ } while (sc->nr_reclaimed < nr_pages && --sc->priority >= 0);
}
set_task_reclaim_state(p, NULL);
memalloc_noreclaim_restore(noreclaim_flag);
- fs_reclaim_release(sc.gfp_mask);
+ fs_reclaim_release(sc->gfp_mask);
delayacct_freepages_end();
psi_memstall_leave(&pflags);
- trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed);
+ trace_mm_vmscan_node_reclaim_end(sc->nr_reclaimed);
- return sc.nr_reclaimed >= nr_pages;
+ return sc->nr_reclaimed;
}
int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
{
int ret;
+ /* Minimum pages needed in order to stay on node */
+ const unsigned long nr_pages = 1 << order;
+ struct scan_control sc = {
+ .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
+ .gfp_mask = current_gfp_context(gfp_mask),
+ .order = order,
+ .priority = NODE_RECLAIM_PRIORITY,
+ .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
+ .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
+ .may_swap = 1,
+ .reclaim_idx = gfp_zone(gfp_mask),
+ };
/*
* Node reclaim reclaims unmapped file backed pages and
@@ -7708,7 +7710,7 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
if (test_and_set_bit_lock(PGDAT_RECLAIM_LOCKED, &pgdat->flags))
return NODE_RECLAIM_NOSCAN;
- ret = __node_reclaim(pgdat, gfp_mask, order);
+ ret = __node_reclaim(pgdat, gfp_mask, nr_pages, &sc) >= nr_pages;
clear_bit_unlock(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
if (ret)
--
2.39.5
next prev parent reply other threads:[~2025-06-23 18:59 UTC|newest]
Thread overview: 28+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-06-23 18:58 [PATCH -next v2 0/4] mm: per-node proactive reclaim Davidlohr Bueso
2025-06-23 18:58 ` [PATCH 1/4] mm/vmscan: respect psi_memstall region in node reclaim Davidlohr Bueso
2025-06-25 17:08 ` Shakeel Butt
2025-07-17 1:44 ` Roman Gushchin
2025-06-23 18:58 ` [PATCH 2/4] mm/memcg: make memory.reclaim interface generic Davidlohr Bueso
2025-06-23 21:45 ` Andrew Morton
2025-06-23 23:36 ` Davidlohr Bueso
2025-06-24 18:26 ` Klara Modin
2025-07-17 1:58 ` Roman Gushchin
2025-07-17 16:35 ` Davidlohr Bueso
2025-07-17 22:17 ` Shakeel Butt
2025-07-17 22:52 ` Andrew Morton
2025-07-17 23:56 ` Davidlohr Bueso
2025-07-18 0:17 ` Shakeel Butt
2025-06-23 18:58 ` Davidlohr Bueso [this message]
2025-07-17 2:03 ` [PATCH 3/4] mm/vmscan: make __node_reclaim() more generic Roman Gushchin
2025-07-17 22:25 ` Shakeel Butt
2025-06-23 18:58 ` [PATCH 4/4] mm: introduce per-node proactive reclaim interface Davidlohr Bueso
2025-06-25 23:10 ` Shakeel Butt
2025-06-27 19:07 ` SeongJae Park
2025-07-17 2:46 ` Roman Gushchin
2025-07-17 16:26 ` Davidlohr Bueso
2025-07-17 22:46 ` Andrew Morton
[not found] ` <20250717064925.2304-1-hdanton@sina.com>
2025-07-17 7:39 ` Michal Hocko
2025-07-17 22:28 ` Shakeel Butt
2025-06-23 21:50 ` [PATCH -next v2 0/4] mm: per-node proactive reclaim Andrew Morton
2025-07-16 0:24 ` Andrew Morton
2025-07-16 15:15 ` Shakeel Butt
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250623185851.830632-4-dave@stgolabs.net \
--to=dave@stgolabs.net \
--cc=akpm@linux-foundation.org \
--cc=hannes@cmpxchg.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@kernel.org \
--cc=roman.gushchin@linux.dev \
--cc=shakeel.butt@linux.dev \
--cc=yosryahmed@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).