* [PATCH v2 01/12] mm/mglru: consolidate common code for retrieving evitable size
2026-03-28 19:52 [PATCH v2 00/12] mm/mglru: improve reclaim loop and dirty folio handling Kairui Song via B4 Relay
@ 2026-03-28 19:52 ` Kairui Song via B4 Relay
2026-03-28 19:52 ` [PATCH v2 02/12] mm/mglru: rename variables related to aging and rotation Kairui Song via B4 Relay
` (10 subsequent siblings)
11 siblings, 0 replies; 19+ messages in thread
From: Kairui Song via B4 Relay @ 2026-03-28 19:52 UTC (permalink / raw)
To: linux-mm
Cc: Andrew Morton, Axel Rasmussen, Yuanchu Xie, Wei Xu,
Johannes Weiner, David Hildenbrand, Michal Hocko, Qi Zheng,
Shakeel Butt, Lorenzo Stoakes, Barry Song, David Stevens,
Chen Ridong, Leno Hou, Yafang Shao, Yu Zhao, Zicheng Wang,
Kalesh Singh, Suren Baghdasaryan, Chris Li, Vernon Yang,
linux-kernel, Qi Zheng, Baolin Wang, Kairui Song
From: Kairui Song <kasong@tencent.com>
Merge commonly used code for counting evictable folios in a lruvec.
No behavior change.
Return unsigned long instead of long as suggested [ Axel Rasmussen ]
Acked-by: Yuanchu Xie <yuanchu@google.com>
Reviewed-by: Barry Song <baohua@kernel.org>
Reviewed-by: Chen Ridong <chenridong@huaweicloud.com>
Reviewed-by: Axel Rasmussen <axelrasmussen@google.com>
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Signed-off-by: Kairui Song <kasong@tencent.com>
---
mm/vmscan.c | 36 ++++++++++++++----------------------
1 file changed, 14 insertions(+), 22 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5a8c8fcccbfc..adc07501a137 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4084,27 +4084,33 @@ static void set_initial_priority(struct pglist_data *pgdat, struct scan_control
sc->priority = clamp(priority, DEF_PRIORITY / 2, DEF_PRIORITY);
}
-static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc)
+static unsigned long lruvec_evictable_size(struct lruvec *lruvec, int swappiness)
{
int gen, type, zone;
- unsigned long total = 0;
- int swappiness = get_swappiness(lruvec, sc);
+ unsigned long seq, total = 0;
struct lru_gen_folio *lrugen = &lruvec->lrugen;
- struct mem_cgroup *memcg = lruvec_memcg(lruvec);
DEFINE_MAX_SEQ(lruvec);
DEFINE_MIN_SEQ(lruvec);
for_each_evictable_type(type, swappiness) {
- unsigned long seq;
-
for (seq = min_seq[type]; seq <= max_seq; seq++) {
gen = lru_gen_from_seq(seq);
-
for (zone = 0; zone < MAX_NR_ZONES; zone++)
total += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
}
}
+ return total;
+}
+
+static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc)
+{
+ unsigned long total;
+ int swappiness = get_swappiness(lruvec, sc);
+ struct mem_cgroup *memcg = lruvec_memcg(lruvec);
+
+ total = lruvec_evictable_size(lruvec, swappiness);
+
/* whether the size is big enough to be helpful */
return mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
}
@@ -4909,9 +4915,6 @@ static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
int swappiness, unsigned long *nr_to_scan)
{
- int gen, type, zone;
- unsigned long size = 0;
- struct lru_gen_folio *lrugen = &lruvec->lrugen;
DEFINE_MIN_SEQ(lruvec);
*nr_to_scan = 0;
@@ -4919,18 +4922,7 @@ static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
if (evictable_min_seq(min_seq, swappiness) + MIN_NR_GENS > max_seq)
return true;
- for_each_evictable_type(type, swappiness) {
- unsigned long seq;
-
- for (seq = min_seq[type]; seq <= max_seq; seq++) {
- gen = lru_gen_from_seq(seq);
-
- for (zone = 0; zone < MAX_NR_ZONES; zone++)
- size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
- }
- }
-
- *nr_to_scan = size;
+ *nr_to_scan = lruvec_evictable_size(lruvec, swappiness);
/* better to run aging even though eviction is still possible */
return evictable_min_seq(min_seq, swappiness) + MIN_NR_GENS == max_seq;
}
--
2.53.0
^ permalink raw reply related [flat|nested] 19+ messages in thread* [PATCH v2 02/12] mm/mglru: rename variables related to aging and rotation
2026-03-28 19:52 [PATCH v2 00/12] mm/mglru: improve reclaim loop and dirty folio handling Kairui Song via B4 Relay
2026-03-28 19:52 ` [PATCH v2 01/12] mm/mglru: consolidate common code for retrieving evitable size Kairui Song via B4 Relay
@ 2026-03-28 19:52 ` Kairui Song via B4 Relay
2026-03-30 1:57 ` Chen Ridong
2026-03-30 7:59 ` Baolin Wang
2026-03-28 19:52 ` [PATCH v2 03/12] mm/mglru: relocate the LRU scan batch limit to callers Kairui Song via B4 Relay
` (9 subsequent siblings)
11 siblings, 2 replies; 19+ messages in thread
From: Kairui Song via B4 Relay @ 2026-03-28 19:52 UTC (permalink / raw)
To: linux-mm
Cc: Andrew Morton, Axel Rasmussen, Yuanchu Xie, Wei Xu,
Johannes Weiner, David Hildenbrand, Michal Hocko, Qi Zheng,
Shakeel Butt, Lorenzo Stoakes, Barry Song, David Stevens,
Chen Ridong, Leno Hou, Yafang Shao, Yu Zhao, Zicheng Wang,
Kalesh Singh, Suren Baghdasaryan, Chris Li, Vernon Yang,
linux-kernel, Qi Zheng, Baolin Wang, Kairui Song
From: Kairui Song <kasong@tencent.com>
The current variable name isn't helpful. Make the variable names more
meaningful.
Only naming change, no behavior change.
Suggested-by: Barry Song <baohua@kernel.org>
Signed-off-by: Kairui Song <kasong@tencent.com>
---
mm/vmscan.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index adc07501a137..f336f89a2de6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4934,7 +4934,7 @@ static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
*/
static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int swappiness)
{
- bool success;
+ bool need_aging;
unsigned long nr_to_scan;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
DEFINE_MAX_SEQ(lruvec);
@@ -4942,7 +4942,7 @@ static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int s
if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg))
return -1;
- success = should_run_aging(lruvec, max_seq, swappiness, &nr_to_scan);
+ need_aging = should_run_aging(lruvec, max_seq, swappiness, &nr_to_scan);
/* try to scrape all its memory if this memcg was deleted */
if (nr_to_scan && !mem_cgroup_online(memcg))
@@ -4951,7 +4951,7 @@ static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int s
nr_to_scan = apply_proportional_protection(memcg, sc, nr_to_scan);
/* try to get away with not aging at the default priority */
- if (!success || sc->priority == DEF_PRIORITY)
+ if (!need_aging || sc->priority == DEF_PRIORITY)
return nr_to_scan >> sc->priority;
/* stop scanning this lruvec as it's low on cold folios */
@@ -5040,7 +5040,7 @@ static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
{
- bool success;
+ bool need_rotate;
unsigned long scanned = sc->nr_scanned;
unsigned long reclaimed = sc->nr_reclaimed;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
@@ -5058,7 +5058,7 @@ static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
memcg_memory_event(memcg, MEMCG_LOW);
}
- success = try_to_shrink_lruvec(lruvec, sc);
+ need_rotate = try_to_shrink_lruvec(lruvec, sc);
shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority);
@@ -5068,10 +5068,10 @@ static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
flush_reclaim_state(sc);
- if (success && mem_cgroup_online(memcg))
+ if (need_rotate && mem_cgroup_online(memcg))
return MEMCG_LRU_YOUNG;
- if (!success && lruvec_is_sizable(lruvec, sc))
+ if (!need_rotate && lruvec_is_sizable(lruvec, sc))
return 0;
/* one retry if offlined or too small */
--
2.53.0
^ permalink raw reply related [flat|nested] 19+ messages in thread* Re: [PATCH v2 02/12] mm/mglru: rename variables related to aging and rotation
2026-03-28 19:52 ` [PATCH v2 02/12] mm/mglru: rename variables related to aging and rotation Kairui Song via B4 Relay
@ 2026-03-30 1:57 ` Chen Ridong
2026-03-30 7:59 ` Baolin Wang
1 sibling, 0 replies; 19+ messages in thread
From: Chen Ridong @ 2026-03-30 1:57 UTC (permalink / raw)
To: kasong, linux-mm
Cc: Andrew Morton, Axel Rasmussen, Yuanchu Xie, Wei Xu,
Johannes Weiner, David Hildenbrand, Michal Hocko, Qi Zheng,
Shakeel Butt, Lorenzo Stoakes, Barry Song, David Stevens,
Leno Hou, Yafang Shao, Yu Zhao, Zicheng Wang, Kalesh Singh,
Suren Baghdasaryan, Chris Li, Vernon Yang, linux-kernel, Qi Zheng,
Baolin Wang
On 2026/3/29 3:52, Kairui Song via B4 Relay wrote:
> From: Kairui Song <kasong@tencent.com>
>
> The current variable name isn't helpful. Make the variable names more
> meaningful.
>
> Only naming change, no behavior change.
>
> Suggested-by: Barry Song <baohua@kernel.org>
> Signed-off-by: Kairui Song <kasong@tencent.com>
> ---
> mm/vmscan.c | 14 +++++++-------
> 1 file changed, 7 insertions(+), 7 deletions(-)
>
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index adc07501a137..f336f89a2de6 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -4934,7 +4934,7 @@ static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
> */
> static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int swappiness)
> {
> - bool success;
> + bool need_aging;
> unsigned long nr_to_scan;
> struct mem_cgroup *memcg = lruvec_memcg(lruvec);
> DEFINE_MAX_SEQ(lruvec);
> @@ -4942,7 +4942,7 @@ static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int s
> if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg))
> return -1;
>
> - success = should_run_aging(lruvec, max_seq, swappiness, &nr_to_scan);
> + need_aging = should_run_aging(lruvec, max_seq, swappiness, &nr_to_scan);
>
> /* try to scrape all its memory if this memcg was deleted */
> if (nr_to_scan && !mem_cgroup_online(memcg))
> @@ -4951,7 +4951,7 @@ static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int s
> nr_to_scan = apply_proportional_protection(memcg, sc, nr_to_scan);
>
> /* try to get away with not aging at the default priority */
> - if (!success || sc->priority == DEF_PRIORITY)
> + if (!need_aging || sc->priority == DEF_PRIORITY)
> return nr_to_scan >> sc->priority;
>
> /* stop scanning this lruvec as it's low on cold folios */
> @@ -5040,7 +5040,7 @@ static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
>
> static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
> {
> - bool success;
> + bool need_rotate;
> unsigned long scanned = sc->nr_scanned;
> unsigned long reclaimed = sc->nr_reclaimed;
> struct mem_cgroup *memcg = lruvec_memcg(lruvec);
> @@ -5058,7 +5058,7 @@ static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
> memcg_memory_event(memcg, MEMCG_LOW);
> }
>
> - success = try_to_shrink_lruvec(lruvec, sc);
> + need_rotate = try_to_shrink_lruvec(lruvec, sc);
>
> shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority);
>
> @@ -5068,10 +5068,10 @@ static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
>
> flush_reclaim_state(sc);
>
> - if (success && mem_cgroup_online(memcg))
> + if (need_rotate && mem_cgroup_online(memcg))
> return MEMCG_LRU_YOUNG;
>
> - if (!success && lruvec_is_sizable(lruvec, sc))
> + if (!need_rotate && lruvec_is_sizable(lruvec, sc))
> return 0;
>
> /* one retry if offlined or too small */
>
Reviewed-by: Chen Ridong <chenridong@huaweicloud.com>
--
Best regards,
Ridong
^ permalink raw reply [flat|nested] 19+ messages in thread* Re: [PATCH v2 02/12] mm/mglru: rename variables related to aging and rotation
2026-03-28 19:52 ` [PATCH v2 02/12] mm/mglru: rename variables related to aging and rotation Kairui Song via B4 Relay
2026-03-30 1:57 ` Chen Ridong
@ 2026-03-30 7:59 ` Baolin Wang
1 sibling, 0 replies; 19+ messages in thread
From: Baolin Wang @ 2026-03-30 7:59 UTC (permalink / raw)
To: kasong, linux-mm
Cc: Andrew Morton, Axel Rasmussen, Yuanchu Xie, Wei Xu,
Johannes Weiner, David Hildenbrand, Michal Hocko, Qi Zheng,
Shakeel Butt, Lorenzo Stoakes, Barry Song, David Stevens,
Chen Ridong, Leno Hou, Yafang Shao, Yu Zhao, Zicheng Wang,
Kalesh Singh, Suren Baghdasaryan, Chris Li, Vernon Yang,
linux-kernel, Qi Zheng
On 3/29/26 3:52 AM, Kairui Song via B4 Relay wrote:
> From: Kairui Song <kasong@tencent.com>
>
> The current variable name isn't helpful. Make the variable names more
> meaningful.
>
> Only naming change, no behavior change.
>
> Suggested-by: Barry Song <baohua@kernel.org>
> Signed-off-by: Kairui Song <kasong@tencent.com>
> ---
LGTM.
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH v2 03/12] mm/mglru: relocate the LRU scan batch limit to callers
2026-03-28 19:52 [PATCH v2 00/12] mm/mglru: improve reclaim loop and dirty folio handling Kairui Song via B4 Relay
2026-03-28 19:52 ` [PATCH v2 01/12] mm/mglru: consolidate common code for retrieving evitable size Kairui Song via B4 Relay
2026-03-28 19:52 ` [PATCH v2 02/12] mm/mglru: rename variables related to aging and rotation Kairui Song via B4 Relay
@ 2026-03-28 19:52 ` Kairui Song via B4 Relay
2026-03-30 8:14 ` Baolin Wang
2026-03-28 19:52 ` [PATCH v2 04/12] mm/mglru: restructure the reclaim loop Kairui Song via B4 Relay
` (8 subsequent siblings)
11 siblings, 1 reply; 19+ messages in thread
From: Kairui Song via B4 Relay @ 2026-03-28 19:52 UTC (permalink / raw)
To: linux-mm
Cc: Andrew Morton, Axel Rasmussen, Yuanchu Xie, Wei Xu,
Johannes Weiner, David Hildenbrand, Michal Hocko, Qi Zheng,
Shakeel Butt, Lorenzo Stoakes, Barry Song, David Stevens,
Chen Ridong, Leno Hou, Yafang Shao, Yu Zhao, Zicheng Wang,
Kalesh Singh, Suren Baghdasaryan, Chris Li, Vernon Yang,
linux-kernel, Qi Zheng, Baolin Wang, Kairui Song
From: Kairui Song <kasong@tencent.com>
Same as active / inactive LRU, MGLRU isolates and scans folios in
batches. The batch split is done hidden deep in the helper, which
makes the code harder to follow. The helper's arguments are also
confusing since callers usually request more folios than the batch
size, so the helper almost never processes the full requested amount.
Move the batch splitting into the top loop to make it cleaner, there
should be no behavior change.
Reviewed-by: Axel Rasmussen <axelrasmussen@google.com>
Signed-off-by: Kairui Song <kasong@tencent.com>
---
mm/vmscan.c | 16 +++++++++-------
1 file changed, 9 insertions(+), 7 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index f336f89a2de6..963362523782 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4695,10 +4695,10 @@ static int scan_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
int scanned = 0;
int isolated = 0;
int skipped = 0;
- int scan_batch = min(nr_to_scan, MAX_LRU_BATCH);
- int remaining = scan_batch;
+ unsigned long remaining = nr_to_scan;
struct lru_gen_folio *lrugen = &lruvec->lrugen;
+ VM_WARN_ON_ONCE(nr_to_scan > MAX_LRU_BATCH);
VM_WARN_ON_ONCE(!list_empty(list));
if (get_nr_gens(lruvec, type) == MIN_NR_GENS)
@@ -4751,7 +4751,7 @@ static int scan_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
mod_lruvec_state(lruvec, item, isolated);
mod_lruvec_state(lruvec, PGREFILL, sorted);
mod_lruvec_state(lruvec, PGSCAN_ANON + type, isolated);
- trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, scan_batch,
+ trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan,
scanned, skipped, isolated,
type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON);
if (type == LRU_GEN_FILE)
@@ -4987,7 +4987,7 @@ static bool should_abort_scan(struct lruvec *lruvec, struct scan_control *sc)
static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
{
- long nr_to_scan;
+ long nr_batch, nr_to_scan;
unsigned long scanned = 0;
int swappiness = get_swappiness(lruvec, sc);
@@ -4998,7 +4998,8 @@ static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
if (nr_to_scan <= 0)
break;
- delta = evict_folios(nr_to_scan, lruvec, sc, swappiness);
+ nr_batch = min(nr_to_scan, MAX_LRU_BATCH);
+ delta = evict_folios(nr_batch, lruvec, sc, swappiness);
if (!delta)
break;
@@ -5623,6 +5624,7 @@ static int run_aging(struct lruvec *lruvec, unsigned long seq,
static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc,
int swappiness, unsigned long nr_to_reclaim)
{
+ int nr_batch;
DEFINE_MAX_SEQ(lruvec);
if (seq + MIN_NR_GENS > max_seq)
@@ -5639,8 +5641,8 @@ static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_co
if (sc->nr_reclaimed >= nr_to_reclaim)
return 0;
- if (!evict_folios(nr_to_reclaim - sc->nr_reclaimed, lruvec, sc,
- swappiness))
+ nr_batch = min(nr_to_reclaim - sc->nr_reclaimed, MAX_LRU_BATCH);
+ if (!evict_folios(nr_batch, lruvec, sc, swappiness))
return 0;
cond_resched();
--
2.53.0
^ permalink raw reply related [flat|nested] 19+ messages in thread* Re: [PATCH v2 03/12] mm/mglru: relocate the LRU scan batch limit to callers
2026-03-28 19:52 ` [PATCH v2 03/12] mm/mglru: relocate the LRU scan batch limit to callers Kairui Song via B4 Relay
@ 2026-03-30 8:14 ` Baolin Wang
0 siblings, 0 replies; 19+ messages in thread
From: Baolin Wang @ 2026-03-30 8:14 UTC (permalink / raw)
To: kasong, linux-mm
Cc: Andrew Morton, Axel Rasmussen, Yuanchu Xie, Wei Xu,
Johannes Weiner, David Hildenbrand, Michal Hocko, Qi Zheng,
Shakeel Butt, Lorenzo Stoakes, Barry Song, David Stevens,
Chen Ridong, Leno Hou, Yafang Shao, Yu Zhao, Zicheng Wang,
Kalesh Singh, Suren Baghdasaryan, Chris Li, Vernon Yang,
linux-kernel, Qi Zheng
On 3/29/26 3:52 AM, Kairui Song via B4 Relay wrote:
> From: Kairui Song <kasong@tencent.com>
>
> Same as active / inactive LRU, MGLRU isolates and scans folios in
> batches. The batch split is done hidden deep in the helper, which
> makes the code harder to follow. The helper's arguments are also
> confusing since callers usually request more folios than the batch
> size, so the helper almost never processes the full requested amount.
>
> Move the batch splitting into the top loop to make it cleaner, there
> should be no behavior change.
>
> Reviewed-by: Axel Rasmussen <axelrasmussen@google.com>
> Signed-off-by: Kairui Song <kasong@tencent.com>
> ---
Some nits as follows, otherwise LGTM.
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
> mm/vmscan.c | 16 +++++++++-------
> 1 file changed, 9 insertions(+), 7 deletions(-)
>
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index f336f89a2de6..963362523782 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -4695,10 +4695,10 @@ static int scan_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
> int scanned = 0;
> int isolated = 0;
> int skipped = 0;
> - int scan_batch = min(nr_to_scan, MAX_LRU_BATCH);
> - int remaining = scan_batch;
> + unsigned long remaining = nr_to_scan;
> struct lru_gen_folio *lrugen = &lruvec->lrugen;
>
> + VM_WARN_ON_ONCE(nr_to_scan > MAX_LRU_BATCH);
> VM_WARN_ON_ONCE(!list_empty(list));
>
> if (get_nr_gens(lruvec, type) == MIN_NR_GENS)
> @@ -4751,7 +4751,7 @@ static int scan_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
> mod_lruvec_state(lruvec, item, isolated);
> mod_lruvec_state(lruvec, PGREFILL, sorted);
> mod_lruvec_state(lruvec, PGSCAN_ANON + type, isolated);
> - trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, scan_batch,
> + trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan,
> scanned, skipped, isolated,
> type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON);
> if (type == LRU_GEN_FILE)
> @@ -4987,7 +4987,7 @@ static bool should_abort_scan(struct lruvec *lruvec, struct scan_control *sc)
>
> static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
> {
> - long nr_to_scan;
> + long nr_batch, nr_to_scan;
Nit: Since evict_folios() expects an unsgined long, why not define
'unsigned long nr_batch'?
> unsigned long scanned = 0;
> int swappiness = get_swappiness(lruvec, sc);
>
> @@ -4998,7 +4998,8 @@ static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
> if (nr_to_scan <= 0)
> break;
>
> - delta = evict_folios(nr_to_scan, lruvec, sc, swappiness);
> + nr_batch = min(nr_to_scan, MAX_LRU_BATCH);
> + delta = evict_folios(nr_batch, lruvec, sc, swappiness);
> if (!delta)
> break;
>
> @@ -5623,6 +5624,7 @@ static int run_aging(struct lruvec *lruvec, unsigned long seq,
> static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc,
> int swappiness, unsigned long nr_to_reclaim)
> {
> + int nr_batch;
Nit: since 'nr_to_reclaim' is unsigned long, better to use unsigned long
for 'nr_batch'.
> DEFINE_MAX_SEQ(lruvec);
>
> if (seq + MIN_NR_GENS > max_seq)
> @@ -5639,8 +5641,8 @@ static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_co
> if (sc->nr_reclaimed >= nr_to_reclaim)
> return 0;
>
> - if (!evict_folios(nr_to_reclaim - sc->nr_reclaimed, lruvec, sc,
> - swappiness))
> + nr_batch = min(nr_to_reclaim - sc->nr_reclaimed, MAX_LRU_BATCH);
> + if (!evict_folios(nr_batch, lruvec, sc, swappiness))
> return 0;
>
> cond_resched();
>
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH v2 04/12] mm/mglru: restructure the reclaim loop
2026-03-28 19:52 [PATCH v2 00/12] mm/mglru: improve reclaim loop and dirty folio handling Kairui Song via B4 Relay
` (2 preceding siblings ...)
2026-03-28 19:52 ` [PATCH v2 03/12] mm/mglru: relocate the LRU scan batch limit to callers Kairui Song via B4 Relay
@ 2026-03-28 19:52 ` Kairui Song via B4 Relay
2026-03-29 6:47 ` Kairui Song
2026-03-28 19:52 ` [PATCH v2 05/12] mm/mglru: scan and count the exact number of folios Kairui Song via B4 Relay
` (7 subsequent siblings)
11 siblings, 1 reply; 19+ messages in thread
From: Kairui Song via B4 Relay @ 2026-03-28 19:52 UTC (permalink / raw)
To: linux-mm
Cc: Andrew Morton, Axel Rasmussen, Yuanchu Xie, Wei Xu,
Johannes Weiner, David Hildenbrand, Michal Hocko, Qi Zheng,
Shakeel Butt, Lorenzo Stoakes, Barry Song, David Stevens,
Chen Ridong, Leno Hou, Yafang Shao, Yu Zhao, Zicheng Wang,
Kalesh Singh, Suren Baghdasaryan, Chris Li, Vernon Yang,
linux-kernel, Qi Zheng, Baolin Wang, Kairui Song
From: Kairui Song <kasong@tencent.com>
The current loop will calculate the scan number on each iteration. The
number of folios to scan is based on the LRU length, with some unclear
behaviors, eg, it only shifts the scan number by reclaim priority at the
default priority, and it couples the number calculation with aging and
rotation.
Adjust, simplify it, and decouple aging and rotation. Just calculate the
scan number for once at the beginning of the reclaim, always respect the
reclaim priority, and make the aging and rotation more explicit.
This slightly changes how offline memcg aging works: previously, offline
memcg wouldn't be aged unless it didn't have any evictable folios. Now,
we might age it if it has only 3 generations and the reclaim priority is
less than DEF_PRIORITY, which should be fine. On one hand, offline memcg
might still hold long-term folios, and in fact, a long-existing offline
memcg must be pinned by some long-term folios like shmem. These folios
might be used by other memcg, so aging them as ordinary memcg doesn't
seem wrong. And besides, aging enables further reclaim of an offlined
memcg, which will certainly happen if we keep shrinking it. And offline
memcg might soon be no longer an issue once reparenting is all ready.
Overall, the memcg LRU rotation, as described in mmzone.h,
remains the same.
Reviewed-by: Axel Rasmussen <axelrasmussen@google.com>
Signed-off-by: Kairui Song <kasong@tencent.com>
---
mm/vmscan.c | 70 +++++++++++++++++++++++++++++++------------------------------
1 file changed, 36 insertions(+), 34 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 963362523782..ab81ffdb241a 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4913,49 +4913,40 @@ static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
}
static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
- int swappiness, unsigned long *nr_to_scan)
+ struct scan_control *sc, int swappiness)
{
DEFINE_MIN_SEQ(lruvec);
- *nr_to_scan = 0;
/* have to run aging, since eviction is not possible anymore */
if (evictable_min_seq(min_seq, swappiness) + MIN_NR_GENS > max_seq)
return true;
- *nr_to_scan = lruvec_evictable_size(lruvec, swappiness);
+ /* try to get away with not aging at the default priority */
+ if (sc->priority == DEF_PRIORITY)
+ return false;
+
/* better to run aging even though eviction is still possible */
return evictable_min_seq(min_seq, swappiness) + MIN_NR_GENS == max_seq;
}
-/*
- * For future optimizations:
- * 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg
- * reclaim.
- */
-static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int swappiness)
+static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc,
+ struct mem_cgroup *memcg, int swappiness)
{
- bool need_aging;
unsigned long nr_to_scan;
- struct mem_cgroup *memcg = lruvec_memcg(lruvec);
- DEFINE_MAX_SEQ(lruvec);
-
- if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg))
- return -1;
-
- need_aging = should_run_aging(lruvec, max_seq, swappiness, &nr_to_scan);
+ nr_to_scan = lruvec_evictable_size(lruvec, swappiness);
/* try to scrape all its memory if this memcg was deleted */
- if (nr_to_scan && !mem_cgroup_online(memcg))
+ if (!mem_cgroup_online(memcg))
return nr_to_scan;
nr_to_scan = apply_proportional_protection(memcg, sc, nr_to_scan);
- /* try to get away with not aging at the default priority */
- if (!need_aging || sc->priority == DEF_PRIORITY)
- return nr_to_scan >> sc->priority;
-
- /* stop scanning this lruvec as it's low on cold folios */
- return try_to_inc_max_seq(lruvec, max_seq, swappiness, false) ? -1 : 0;
+ /*
+ * Always respect scan priority, minimally target
+ * SWAP_CLUSTER_MAX pages to keep reclaim moving forwards.
+ */
+ nr_to_scan >>= sc->priority;
+ return max(nr_to_scan, SWAP_CLUSTER_MAX);
}
static bool should_abort_scan(struct lruvec *lruvec, struct scan_control *sc)
@@ -4985,31 +4976,43 @@ static bool should_abort_scan(struct lruvec *lruvec, struct scan_control *sc)
return true;
}
+/*
+ * For future optimizations:
+ * 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg
+ * reclaim.
+ */
static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
{
+ bool need_rotate = false;
long nr_batch, nr_to_scan;
- unsigned long scanned = 0;
int swappiness = get_swappiness(lruvec, sc);
+ struct mem_cgroup *memcg = lruvec_memcg(lruvec);
- while (true) {
+ nr_to_scan = get_nr_to_scan(lruvec, sc, memcg, swappiness);
+ while (nr_to_scan > 0) {
int delta;
+ DEFINE_MAX_SEQ(lruvec);
- nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness);
- if (nr_to_scan <= 0)
+ if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg)) {
+ need_rotate = true;
break;
+ }
+
+ if (should_run_aging(lruvec, max_seq, sc, swappiness)) {
+ if (try_to_inc_max_seq(lruvec, max_seq, swappiness, false))
+ need_rotate = true;
+ break;
+ }
nr_batch = min(nr_to_scan, MAX_LRU_BATCH);
delta = evict_folios(nr_batch, lruvec, sc, swappiness);
if (!delta)
break;
- scanned += delta;
- if (scanned >= nr_to_scan)
- break;
-
if (should_abort_scan(lruvec, sc))
break;
+ nr_to_scan -= delta;
cond_resched();
}
@@ -5035,8 +5038,7 @@ static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
}
- /* whether this lruvec should be rotated */
- return nr_to_scan < 0;
+ return need_rotate;
}
static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
--
2.53.0
^ permalink raw reply related [flat|nested] 19+ messages in thread* Re: [PATCH v2 04/12] mm/mglru: restructure the reclaim loop
2026-03-28 19:52 ` [PATCH v2 04/12] mm/mglru: restructure the reclaim loop Kairui Song via B4 Relay
@ 2026-03-29 6:47 ` Kairui Song
0 siblings, 0 replies; 19+ messages in thread
From: Kairui Song @ 2026-03-29 6:47 UTC (permalink / raw)
To: linux-mm
Cc: Kairui Song, Andrew Morton, Axel Rasmussen, Yuanchu Xie, Wei Xu,
Johannes Weiner, David Hildenbrand, Michal Hocko, Qi Zheng,
Shakeel Butt, Lorenzo Stoakes, Barry Song, David Stevens,
Chen Ridong, Leno Hou, Yafang Shao, Yu Zhao, Zicheng Wang,
Kalesh Singh, Suren Baghdasaryan, Chris Li, Vernon Yang,
linux-kernel, Qi Zheng, Baolin Wang
So sashiko provided some review, I'll post it below:
https://sashiko.dev/#/patchset/20260329-mglru-reclaim-v2-0-b53a3678513c%40tencent.com?patch=12986
> From sashiko:
>
> > diff --git a/mm/vmscan.c b/mm/vmscan.c
> > --- a/mm/vmscan.c
> > +++ b/mm/vmscan.c
> > @@ -4913,49 +4913,40 @@ static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
> > }
> >
> > static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
> > - int swappiness, unsigned long *nr_to_scan)
> > + struct scan_control *sc, int swappiness)
> > {
> > DEFINE_MIN_SEQ(lruvec);
> >
> > - *nr_to_scan = 0;
> > /* have to run aging, since eviction is not possible anymore */
> > if (evictable_min_seq(min_seq, swappiness) + MIN_NR_GENS > max_seq)
> > return true;
> >
> > - *nr_to_scan = lruvec_evictable_size(lruvec, swappiness);
> > + /* try to get away with not aging at the default priority */
> > + if (sc->priority == DEF_PRIORITY)
> > + return false;
>
> Is this a regression that unconditionally forces aging even at DEF_PRIORITY?
> In the original code, if eviction wasn't possible, it set need_aging to
> true, but the subsequent check against DEF_PRIORITY could still bypass aging
> and allow MGLRU to skip memcgs without cold pages.
> With the new logic, the mandatory aging check precedes the DEF_PRIORITY
> check, returning true immediately. Will this cause try_to_shrink_lruvec()
> to inappropriately force hot pages into older generations on the gentlest
> reclaim pass instead of trying other memcgs first?
Right, this part is a bit confusing indeed, I tried to split it
into two patches but it seem could be more messy that way.
This is more of an intentional change. When eviction is not possible
(fewer than MIN_NR_GENS generations), aging is the only way to make
progress. The old code always skip aging at DEF_PRIORITY.
For single cgroup setup, this is not helpful in anyway, it waste a
reclaim iteration only to come back at a higher priority and age
then. And it causes over reclaim of slab.
For multi cgroup setup, it may seems reasonable that defer the aging
and reclaim other cgroup a good idea, but we will have to proceed
with aging anyway, potentially when all cgroups are run out of
gens, causing a larger jitter, and break reclaim balance.
And the escalated priority also may cause over reclaim.
And right now we still have 3 gens as the buffer.
Testing the multi cgroup setup using JaveScript workload in the
cover letter showed that after this series, multi cgroup reclaim
balance and latency are good, the total throughput seems better.
Testing the reclaim balance of cgroup tree (see below) also
seems better.
I think to avoid the latency of aging, we really need async
aging, not deferred aging. Async aging might be easier to be
integrated here now after this series.
Could add some word on that in the commit message.
> From sashiko:
>
> > -static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int swappiness)
> > +static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc,
> > + struct mem_cgroup *memcg, int swappiness)
> > {
> > - bool need_aging;
> > unsigned long nr_to_scan;
> > - struct mem_cgroup *memcg = lruvec_memcg(lruvec);
> > - DEFINE_MAX_SEQ(lruvec);
> > -
> > - if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg))
> > - return -1;
> > -
> > - need_aging = should_run_aging(lruvec, max_seq, swappiness, &nr_to_scan);
> >
> > + nr_to_scan = lruvec_evictable_size(lruvec, swappiness);
> > /* try to scrape all its memory if this memcg was deleted */
> > - if (nr_to_scan && !mem_cgroup_online(memcg))
> > + if (!mem_cgroup_online(memcg))
> > return nr_to_scan;
>
> Will this cause a regression resulting in a permanent memory leak for
> offline memcgs that have zero evictable folios?
> If an offline memcg has all its folios in the unevictable youngest
> generation, lruvec_evictable_size() returns 0. The early return here
> then evaluates to 0.
> When try_to_shrink_lruvec() receives 0, its while (nr_to_scan > 0) loop
> is completely bypassed. Since the aging logic was moved inside that loop,
> the offline memcg is never aged, meaning its folios will remain in the
> unevictable youngest generation indefinitely.
Seems hallucination. lruvec_evictable_size returns evictable
folios across gen, not excluding the youngest one since aging
may make then evictable. Perhaps we can add some comment for this,
if it confuses AI, it may confuse human too.
And aging is not out of the loop, it's just moved to a different
place helper.
> From sashiko:
> >
> > nr_to_scan = apply_proportional_protection(memcg, sc, nr_to_scan);
> >
> > - /* try to get away with not aging at the default priority */
> > - if (!need_aging || sc->priority == DEF_PRIORITY)
> > - return nr_to_scan >> sc->priority;
> > -
> > - /* stop scanning this lruvec as it's low on cold folios */
> > - return try_to_inc_max_seq(lruvec, max_seq, swappiness, false) ? -1 : 0;
> > + /*
> > + * Always respect scan priority, minimally target
> > + * SWAP_CLUSTER_MAX pages to keep reclaim moving forwards.
> > + */
> > + nr_to_scan >>= sc->priority;
> > + return max(nr_to_scan, SWAP_CLUSTER_MAX);
> > }
>
> Does this regression break proportional reclaim and memory protection
> semantics for small memcgs by forcing a minimum scan size?
Actually this change was inspired by sashiko's review for V1:
https://sashiko.dev/#/patchset/20260318-mglru-reclaim-v1-0-2c46f9eb0508%40tencent.com?patch=2909
Without this, for cgroups smaller than 16M, def priority scan will just
do nothing. It's still OK but to make it more efficient I added a
minimal batch. But now think again, it better be:
if (!nr_to_scan)
nr_to_scan = min(lruvec_evictable_size, SWAP_CLUSTER_MAX);
Using max() here could get very small cgroups over reclaimed.
I did test V2 using test_memcg_min suggested by af827e090489:
Before:
Proportional reclaim results:
c[0] actual= 29069312 (27M) ideal= 30408704 (29M) err=4.4%
c[1] actual= 23257088 (22M) ideal= 22020096 (21M) err=5.6%
c[2] actual= 1552384 (1M) (expected ~0)
c[3] actual= 0 (0M) (expected =0)
After:
Proportional reclaim results:
c[0] actual= 31391744 (29M) ideal= 30408704 (29M) err=3.2%
c[1] actual= 21028864 (20M) ideal= 22020096 (21M) err=4.5%
c[2] actual= 1515520 (1M) (expected ~0)
c[3] actual= 0 (0M) (expected =0)
In both case the result is somehow not very stable, I run the test
7 times using the medium stable result, after this series it seems
sometimes the result is even better but likely just noisy. And
didn't see a regression.
The 32 folios minimal batch seems already small enough for
typical usage, but min(evictable_size, SWAP_CLUSTER_MAX) is definitely
better. Will send a V3 to update this.
I think non of the benchmark or test would be effected by this.
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH v2 05/12] mm/mglru: scan and count the exact number of folios
2026-03-28 19:52 [PATCH v2 00/12] mm/mglru: improve reclaim loop and dirty folio handling Kairui Song via B4 Relay
` (3 preceding siblings ...)
2026-03-28 19:52 ` [PATCH v2 04/12] mm/mglru: restructure the reclaim loop Kairui Song via B4 Relay
@ 2026-03-28 19:52 ` Kairui Song via B4 Relay
2026-03-28 19:52 ` [PATCH v2 06/12] mm/mglru: use a smaller batch for reclaim Kairui Song via B4 Relay
` (6 subsequent siblings)
11 siblings, 0 replies; 19+ messages in thread
From: Kairui Song via B4 Relay @ 2026-03-28 19:52 UTC (permalink / raw)
To: linux-mm
Cc: Andrew Morton, Axel Rasmussen, Yuanchu Xie, Wei Xu,
Johannes Weiner, David Hildenbrand, Michal Hocko, Qi Zheng,
Shakeel Butt, Lorenzo Stoakes, Barry Song, David Stevens,
Chen Ridong, Leno Hou, Yafang Shao, Yu Zhao, Zicheng Wang,
Kalesh Singh, Suren Baghdasaryan, Chris Li, Vernon Yang,
linux-kernel, Qi Zheng, Baolin Wang, Kairui Song
From: Kairui Song <kasong@tencent.com>
Make the scan helpers return the exact number of folios being scanned
or isolated. Since the reclaim loop now has a natural scan budget that
controls the scan progress, returning the scan number directly should
make the scan more accurate and easier to follow.
The number of scanned folios for each iteration is always positive and
larger than 0, unless the reclaim must stop for a forced aging, so
there is no more need for any special handling when there is no
progress made:
- `return isolated || !remaining ? scanned : 0` in scan_folios: both
the function and the call now just return the exact scan count,
combined with the scan budget introduced in the previous commit to
avoid livelock or under scan.
- `scanned += try_to_inc_min_seq` in evict_folios: adding a bool as a
scan count was kind of confusing and no longer needed too, as scan
number will never be zero even if none of the folio in oldest
generation is isolated.
- `evictable_min_seq + MIN_NR_GENS > max_seq` guard in evict_folios:
the per-type get_nr_gens == MIN_NR_GENS check in scan_folios
naturally returns 0 when only two gens remain and breaks the loop.
Also move try_to_inc_min_seq before isolate_folios, so that any empty
gens created by external folio freeing are also skipped.
The scan still stops if there are only two gens left as the scan number
will be zero, this behavior is same as before. This force gen protection
may get removed or softened later to improve the reclaim a bit more.
Signed-off-by: Kairui Song <kasong@tencent.com>
---
mm/vmscan.c | 46 +++++++++++++++++++++++-----------------------
1 file changed, 23 insertions(+), 23 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index ab81ffdb241a..c5361efa6776 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4686,7 +4686,7 @@ static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct sca
static int scan_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
struct scan_control *sc, int type, int tier,
- struct list_head *list)
+ struct list_head *list, int *isolatedp)
{
int i;
int gen;
@@ -4756,11 +4756,9 @@ static int scan_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON);
if (type == LRU_GEN_FILE)
sc->nr.file_taken += isolated;
- /*
- * There might not be eligible folios due to reclaim_idx. Check the
- * remaining to prevent livelock if it's not making progress.
- */
- return isolated || !remaining ? scanned : 0;
+
+ *isolatedp = isolated;
+ return scanned;
}
static int get_tier_idx(struct lruvec *lruvec, int type)
@@ -4804,33 +4802,36 @@ static int get_type_to_scan(struct lruvec *lruvec, int swappiness)
static int isolate_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
struct scan_control *sc, int swappiness,
- int *type_scanned, struct list_head *list)
+ struct list_head *list, int *isolated,
+ int *isolate_type, int *isolate_scanned)
{
int i;
+ int scanned = 0;
int type = get_type_to_scan(lruvec, swappiness);
for_each_evictable_type(i, swappiness) {
- int scanned;
+ int type_scan;
int tier = get_tier_idx(lruvec, type);
- *type_scanned = type;
+ type_scan = scan_folios(nr_to_scan, lruvec, sc,
+ type, tier, list, isolated);
- scanned = scan_folios(nr_to_scan, lruvec, sc, type, tier, list);
- if (scanned)
- return scanned;
+ scanned += type_scan;
+ if (*isolated) {
+ *isolate_type = type;
+ *isolate_scanned = type_scan;
+ break;
+ }
type = !type;
}
- return 0;
+ return scanned;
}
static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
struct scan_control *sc, int swappiness)
{
- int type;
- int scanned;
- int reclaimed;
LIST_HEAD(list);
LIST_HEAD(clean);
struct folio *folio;
@@ -4838,19 +4839,18 @@ static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
enum node_stat_item item;
struct reclaim_stat stat;
struct lru_gen_mm_walk *walk;
+ int scanned, reclaimed;
+ int isolated = 0, type, type_scanned;
bool skip_retry = false;
- struct lru_gen_folio *lrugen = &lruvec->lrugen;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
lruvec_lock_irq(lruvec);
- scanned = isolate_folios(nr_to_scan, lruvec, sc, swappiness, &type, &list);
-
- scanned += try_to_inc_min_seq(lruvec, swappiness);
+ try_to_inc_min_seq(lruvec, swappiness);
- if (evictable_min_seq(lrugen->min_seq, swappiness) + MIN_NR_GENS > lrugen->max_seq)
- scanned = 0;
+ scanned = isolate_folios(nr_to_scan, lruvec, sc, swappiness,
+ &list, &isolated, &type, &type_scanned);
lruvec_unlock_irq(lruvec);
@@ -4861,7 +4861,7 @@ static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
sc->nr.unqueued_dirty += stat.nr_unqueued_dirty;
sc->nr_reclaimed += reclaimed;
trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
- scanned, reclaimed, &stat, sc->priority,
+ type_scanned, reclaimed, &stat, sc->priority,
type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON);
list_for_each_entry_safe_reverse(folio, next, &list, lru) {
--
2.53.0
^ permalink raw reply related [flat|nested] 19+ messages in thread* [PATCH v2 06/12] mm/mglru: use a smaller batch for reclaim
2026-03-28 19:52 [PATCH v2 00/12] mm/mglru: improve reclaim loop and dirty folio handling Kairui Song via B4 Relay
` (4 preceding siblings ...)
2026-03-28 19:52 ` [PATCH v2 05/12] mm/mglru: scan and count the exact number of folios Kairui Song via B4 Relay
@ 2026-03-28 19:52 ` Kairui Song via B4 Relay
2026-03-28 19:52 ` [PATCH v2 07/12] mm/mglru: don't abort scan immediately right after aging Kairui Song via B4 Relay
` (5 subsequent siblings)
11 siblings, 0 replies; 19+ messages in thread
From: Kairui Song via B4 Relay @ 2026-03-28 19:52 UTC (permalink / raw)
To: linux-mm
Cc: Andrew Morton, Axel Rasmussen, Yuanchu Xie, Wei Xu,
Johannes Weiner, David Hildenbrand, Michal Hocko, Qi Zheng,
Shakeel Butt, Lorenzo Stoakes, Barry Song, David Stevens,
Chen Ridong, Leno Hou, Yafang Shao, Yu Zhao, Zicheng Wang,
Kalesh Singh, Suren Baghdasaryan, Chris Li, Vernon Yang,
linux-kernel, Qi Zheng, Baolin Wang, Kairui Song
From: Kairui Song <kasong@tencent.com>
With a fixed number to reclaim calculated at the beginning, making each
following step smaller should reduce the lock contention and avoid
over-aggressive reclaim of folios, as it will abort earlier when the
number of folios to be reclaimed is reached.
Reviewed-by: Axel Rasmussen <axelrasmussen@google.com>
Reviewed-by: Chen Ridong <chenridong@huaweicloud.com>
Signed-off-by: Kairui Song <kasong@tencent.com>
---
mm/vmscan.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c5361efa6776..e3ca38d0c4cd 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -5004,7 +5004,7 @@ static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
break;
}
- nr_batch = min(nr_to_scan, MAX_LRU_BATCH);
+ nr_batch = min(nr_to_scan, MIN_LRU_BATCH);
delta = evict_folios(nr_batch, lruvec, sc, swappiness);
if (!delta)
break;
--
2.53.0
^ permalink raw reply related [flat|nested] 19+ messages in thread* [PATCH v2 07/12] mm/mglru: don't abort scan immediately right after aging
2026-03-28 19:52 [PATCH v2 00/12] mm/mglru: improve reclaim loop and dirty folio handling Kairui Song via B4 Relay
` (5 preceding siblings ...)
2026-03-28 19:52 ` [PATCH v2 06/12] mm/mglru: use a smaller batch for reclaim Kairui Song via B4 Relay
@ 2026-03-28 19:52 ` Kairui Song via B4 Relay
2026-03-28 19:52 ` [PATCH v2 08/12] mm/mglru: simplify and improve dirty writeback handling Kairui Song via B4 Relay
` (4 subsequent siblings)
11 siblings, 0 replies; 19+ messages in thread
From: Kairui Song via B4 Relay @ 2026-03-28 19:52 UTC (permalink / raw)
To: linux-mm
Cc: Andrew Morton, Axel Rasmussen, Yuanchu Xie, Wei Xu,
Johannes Weiner, David Hildenbrand, Michal Hocko, Qi Zheng,
Shakeel Butt, Lorenzo Stoakes, Barry Song, David Stevens,
Chen Ridong, Leno Hou, Yafang Shao, Yu Zhao, Zicheng Wang,
Kalesh Singh, Suren Baghdasaryan, Chris Li, Vernon Yang,
linux-kernel, Qi Zheng, Baolin Wang, Kairui Song
From: Kairui Song <kasong@tencent.com>
Right now, if eviction triggers aging, the reclaimer will abort. This is
not the optimal strategy for several reasons.
Aborting the reclaim early wastes a reclaim cycle when under pressure,
and for concurrent reclaim, if the LRU is under aging, all concurrent
reclaimers might fail. And if the age has just finished, new cold folios
exposed by the aging are not reclaimed until the next reclaim iteration.
What's more, the current aging trigger is quite lenient, having 3 gens
with a reclaim priority lower than default will trigger aging, and
blocks reclaiming from one memcg. This wastes reclaim retry cycles
easily. And in the worst case, if the reclaim is making slower progress
and all following attempts fail due to being blocked by aging, it
triggers unexpected early OOM.
And if a lruvec requires aging, it doesn't mean it's hot. Instead, the
lruvec could be idle for quite a while, and hence it might contain lots
of cold folios to be reclaimed.
While it's helpful to rotate memcg LRU after aging for global reclaim,
as global reclaim fairness is coupled with the rotation in shrink_many,
memcg fairness is instead handled by cgroup iteration in
shrink_node_memcgs. So, for memcg level pressure, this abort is not the
key part for keeping the fairness. And in most cases, there is no need
to age, and fairness must be achieved by upper-level reclaim control.
So instead, just keep the scanning going unless one whole batch of
folios failed to be isolated or enough folios have been scanned, which
is triggered by evict_folios returning 0. And only abort for global
reclaim after one batch, so when there are fewer memcgs, progress is
still made, and the fairness mechanism described above still works fine.
And in most cases, the one more batch attempt for global reclaim might
just be enough to satisfy what the reclaimer needs, hence improving
global reclaim performance by reducing reclaim retry cycles.
Rotation is still there after the reclaim is done, which still follows
the comment in mmzone.h. And fairness still looking good.
Signed-off-by: Kairui Song <kasong@tencent.com>
---
mm/vmscan.c | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index e3ca38d0c4cd..8de5c8d5849e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4983,7 +4983,7 @@ static bool should_abort_scan(struct lruvec *lruvec, struct scan_control *sc)
*/
static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
{
- bool need_rotate = false;
+ bool need_rotate = false, should_age = false;
long nr_batch, nr_to_scan;
int swappiness = get_swappiness(lruvec, sc);
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
@@ -5001,7 +5001,7 @@ static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
if (should_run_aging(lruvec, max_seq, sc, swappiness)) {
if (try_to_inc_max_seq(lruvec, max_seq, swappiness, false))
need_rotate = true;
- break;
+ should_age = true;
}
nr_batch = min(nr_to_scan, MIN_LRU_BATCH);
@@ -5012,6 +5012,10 @@ static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
if (should_abort_scan(lruvec, sc))
break;
+ /* Cgroup reclaim fairness not guarded by rotate */
+ if (root_reclaim(sc) && should_age)
+ break;
+
nr_to_scan -= delta;
cond_resched();
}
--
2.53.0
^ permalink raw reply related [flat|nested] 19+ messages in thread* [PATCH v2 08/12] mm/mglru: simplify and improve dirty writeback handling
2026-03-28 19:52 [PATCH v2 00/12] mm/mglru: improve reclaim loop and dirty folio handling Kairui Song via B4 Relay
` (6 preceding siblings ...)
2026-03-28 19:52 ` [PATCH v2 07/12] mm/mglru: don't abort scan immediately right after aging Kairui Song via B4 Relay
@ 2026-03-28 19:52 ` Kairui Song via B4 Relay
2026-03-29 8:21 ` Kairui Song
2026-03-28 19:52 ` [PATCH v2 09/12] mm/mglru: remove no longer used reclaim argument for folio protection Kairui Song via B4 Relay
` (3 subsequent siblings)
11 siblings, 1 reply; 19+ messages in thread
From: Kairui Song via B4 Relay @ 2026-03-28 19:52 UTC (permalink / raw)
To: linux-mm
Cc: Andrew Morton, Axel Rasmussen, Yuanchu Xie, Wei Xu,
Johannes Weiner, David Hildenbrand, Michal Hocko, Qi Zheng,
Shakeel Butt, Lorenzo Stoakes, Barry Song, David Stevens,
Chen Ridong, Leno Hou, Yafang Shao, Yu Zhao, Zicheng Wang,
Kalesh Singh, Suren Baghdasaryan, Chris Li, Vernon Yang,
linux-kernel, Qi Zheng, Baolin Wang, Kairui Song
From: Kairui Song <kasong@tencent.com>
The current handling of dirty writeback folios is not working well for
file page heavy workloads: Dirty folios are protected and move to next
gen upon isolation of getting throttled or reactivation upon pageout
(shrink_folio_list).
This might help to reduce the LRU lock contention slightly, but as a
result, the ping-pong effect of folios between head and tail of last two
gens is serious as the shrinker will run into protected dirty writeback
folios more frequently compared to activation. The dirty flush wakeup
condition is also much more passive compared to active/inactive LRU.
Active / inactve LRU wakes the flusher if one batch of folios passed to
shrink_folio_list is unevictable due to under writeback, but MGLRU
instead has to check this after the whole reclaim loop is done, and then
count the isolation protection number compared to the total reclaim
number.
And we previously saw OOM problems with it, too, which were fixed but
still not perfect [1].
So instead, just drop the special handling for dirty writeback, just
re-activate it like active / inactive LRU. And also move the dirty flush
wake up check right after shrink_folio_list. This should improve both
throttling and performance.
Test with YCSB workloadb showed a major performance improvement:
Before this series:
Throughput(ops/sec): 61642.78008938203
AverageLatency(us): 507.11127774145166
pgpgin 158190589
pgpgout 5880616
workingset_refault 7262988
After this commit:
Throughput(ops/sec): 80216.04855744806 (+30.1%, higher is better)
AverageLatency(us): 388.17633477268913 (-23.5%, lower is better)
pgpgin 101871227 (-35.6%, lower is better)
pgpgout 5770028
workingset_refault 3418186 (-52.9%, lower is better)
The refault rate is ~50% lower, and throughput is ~30% higher, which
is a huge gain. We also observed significant performance gain for
other real-world workloads.
We were concerned that the dirty flush could cause more wear for SSD:
that should not be the problem here, since the wakeup condition is when
the dirty folios have been pushed to the tail of LRU, which indicates
that memory pressure is so high that writeback is blocking the workload
already.
Reviewed-by: Axel Rasmussen <axelrasmussen@google.com>
Link: https://lore.kernel.org/linux-mm/20241026115714.1437435-1-jingxiangzeng.cas@gmail.com/ [1]
Signed-off-by: Kairui Song <kasong@tencent.com>
---
mm/vmscan.c | 57 ++++++++++++++++-----------------------------------------
1 file changed, 16 insertions(+), 41 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8de5c8d5849e..17b5318fad39 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4583,7 +4583,6 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c
int tier_idx)
{
bool success;
- bool dirty, writeback;
int gen = folio_lru_gen(folio);
int type = folio_is_file_lru(folio);
int zone = folio_zonenum(folio);
@@ -4633,21 +4632,6 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c
return true;
}
- dirty = folio_test_dirty(folio);
- writeback = folio_test_writeback(folio);
- if (type == LRU_GEN_FILE && dirty) {
- sc->nr.file_taken += delta;
- if (!writeback)
- sc->nr.unqueued_dirty += delta;
- }
-
- /* waiting for writeback */
- if (writeback || (type == LRU_GEN_FILE && dirty)) {
- gen = folio_inc_gen(lruvec, folio, true);
- list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
- return true;
- }
-
return false;
}
@@ -4754,8 +4738,6 @@ static int scan_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan,
scanned, skipped, isolated,
type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON);
- if (type == LRU_GEN_FILE)
- sc->nr.file_taken += isolated;
*isolatedp = isolated;
return scanned;
@@ -4858,12 +4840,27 @@ static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
return scanned;
retry:
reclaimed = shrink_folio_list(&list, pgdat, sc, &stat, false, memcg);
- sc->nr.unqueued_dirty += stat.nr_unqueued_dirty;
sc->nr_reclaimed += reclaimed;
trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
type_scanned, reclaimed, &stat, sc->priority,
type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON);
+ /*
+ * If too many file cache in the coldest generation can't be evicted
+ * due to being dirty, wake up the flusher.
+ */
+ if (stat.nr_unqueued_dirty == isolated) {
+ wakeup_flusher_threads(WB_REASON_VMSCAN);
+
+ /*
+ * For cgroupv1 dirty throttling is achieved by waking up
+ * the kernel flusher here and later waiting on folios
+ * which are in writeback to finish (see shrink_folio_list()).
+ */
+ if (!writeback_throttling_sane(sc))
+ reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
+ }
+
list_for_each_entry_safe_reverse(folio, next, &list, lru) {
DEFINE_MIN_SEQ(lruvec);
@@ -5020,28 +5017,6 @@ static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
cond_resched();
}
- /*
- * If too many file cache in the coldest generation can't be evicted
- * due to being dirty, wake up the flusher.
- */
- if (sc->nr.unqueued_dirty && sc->nr.unqueued_dirty == sc->nr.file_taken) {
- struct pglist_data *pgdat = lruvec_pgdat(lruvec);
-
- wakeup_flusher_threads(WB_REASON_VMSCAN);
-
- /*
- * For cgroupv1 dirty throttling is achieved by waking up
- * the kernel flusher here and later waiting on folios
- * which are in writeback to finish (see shrink_folio_list()).
- *
- * Flusher may not be able to issue writeback quickly
- * enough for cgroupv1 writeback throttling to work
- * on a large system.
- */
- if (!writeback_throttling_sane(sc))
- reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
- }
-
return need_rotate;
}
--
2.53.0
^ permalink raw reply related [flat|nested] 19+ messages in thread* Re: [PATCH v2 08/12] mm/mglru: simplify and improve dirty writeback handling
2026-03-28 19:52 ` [PATCH v2 08/12] mm/mglru: simplify and improve dirty writeback handling Kairui Song via B4 Relay
@ 2026-03-29 8:21 ` Kairui Song
2026-03-29 8:46 ` Kairui Song
0 siblings, 1 reply; 19+ messages in thread
From: Kairui Song @ 2026-03-29 8:21 UTC (permalink / raw)
To: linux-mm
Cc: kasong, Andrew Morton, Axel Rasmussen, Yuanchu Xie, Wei Xu,
Johannes Weiner, David Hildenbrand, Michal Hocko, Qi Zheng,
Shakeel Butt, Lorenzo Stoakes, Barry Song, David Stevens,
Chen Ridong, Leno Hou, Yafang Shao, Yu Zhao, Zicheng Wang,
Kalesh Singh, Suren Baghdasaryan, Chris Li, Vernon Yang,
linux-kernel, Qi Zheng, Baolin Wang
Forwarding sashiko review:
https://sashiko.dev/#/message/20260329-mglru-reclaim-v2-8-b53a3678513c%40tencent.com
> > diff --git a/mm/vmscan.c b/mm/vmscan.c
> > index 8de5c8d5849e..17b5318fad39 100644
> > --- a/mm/vmscan.c
> > +++ b/mm/vmscan.c
> [ ... ]
> > @@ -4633,21 +4632,6 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c
> > return true;
> > }
> >
> > - dirty = folio_test_dirty(folio);
> > - writeback = folio_test_writeback(folio);
> > - if (type == LRU_GEN_FILE && dirty) {
> > - sc->nr.file_taken += delta;
> > - if (!writeback)
> > - sc->nr.unqueued_dirty += delta;
> > - }
> > -
> > - /* waiting for writeback */
> > - if (writeback || (type == LRU_GEN_FILE && dirty)) {
> > - gen = folio_inc_gen(lruvec, folio, true);
> > - list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
> > - return true;
> > - }
> > -
> > return false;
> > }
>
> Could removing this proactive promotion cause a livelock and premature OOM
> during GFP_NOIO or GFP_NOFS direct reclaim?
> Without this check, dirty file folios fall through to isolate_folio(). If
> the reclaim does not have __GFP_IO, isolate_folio() will skip them.
> These skipped folios are spliced back to the front of the oldest generation
> list in scan_folios(). Because isolate_folio() isolates 0 folios,
> evict_folios() returns early.
> This completely bypasses the new wakeup_flusher_threads() call, leaving the
> dirty folios unqueued for writeback, and the oldest generation can never
> advance.
This is a nice found. For GFP_NOIO (or actually !__GFP_IO), we also need
to active and set reclaim for the dirty folios. It's a narrow case though.
Following update should work:
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8170aee096e9..342ba3afe77c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4641,8 +4641,7 @@ static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct sca
/* swap constrained */
if (!(sc->gfp_mask & __GFP_IO) &&
- (folio_test_dirty(folio) ||
- (folio_test_anon(folio) && !folio_test_swapcache(folio))))
+ (folio_test_anon(folio) && !folio_test_swapcache(folio)))
return false;
> [ ... ]
> > @@ -4858,12 +4840,27 @@ static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
> > return scanned;
> > retry:
> > reclaimed = shrink_folio_list(&list, pgdat, sc, &stat, false, memcg);
> > - sc->nr.unqueued_dirty += stat.nr_unqueued_dirty;
> > sc->nr_reclaimed += reclaimed;
> > trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
> > type_scanned, reclaimed, &stat, sc->priority,
> > type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON);
> >
> > + /*
> > + * If too many file cache in the coldest generation can't be evicted
> > + * due to being dirty, wake up the flusher.
> > + */
> > + if (stat.nr_unqueued_dirty == isolated) {
>
> Is the isolated variable stale when evaluated on the retry path?
> If evict_folios() jumps back to the retry label, shrink_folio_list()
> processes a smaller list of only clean folios. The isolated variable retains
> the size of the original list, while stat.nr_unqueued_dirty can only be as
> large as the new, smaller list.
> Does this logically impossible condition cause any unintended behavior, or
> should the check be moved outside the retry loop to avoid confusion?
This is fine, stat.nr_unqueued_dirty is always smaller than isolated. The
"retry" label above only used to handle some folios that are failed to be
reclaimed after isolation. Meanwhile I do think we should clean up this
retry logic as it will also confuse the tracepoint. Better do it later.
^ permalink raw reply related [flat|nested] 19+ messages in thread* Re: [PATCH v2 08/12] mm/mglru: simplify and improve dirty writeback handling
2026-03-29 8:21 ` Kairui Song
@ 2026-03-29 8:46 ` Kairui Song
0 siblings, 0 replies; 19+ messages in thread
From: Kairui Song @ 2026-03-29 8:46 UTC (permalink / raw)
To: linux-mm
Cc: Andrew Morton, Axel Rasmussen, Yuanchu Xie, Wei Xu,
Johannes Weiner, David Hildenbrand, Michal Hocko, Qi Zheng,
Shakeel Butt, Lorenzo Stoakes, Barry Song, David Stevens,
Chen Ridong, Leno Hou, Yafang Shao, Yu Zhao, Zicheng Wang,
Kalesh Singh, Suren Baghdasaryan, Chris Li, Vernon Yang,
linux-kernel, Qi Zheng, Baolin Wang
On Sun, Mar 29, 2026 at 4:21 PM Kairui Song <ryncsn@gmail.com> wrote:
>
> Forwarding sashiko review:
>
> https://sashiko.dev/#/message/20260329-mglru-reclaim-v2-8-b53a3678513c%40tencent.com
>
> > > diff --git a/mm/vmscan.c b/mm/vmscan.c
> > > index 8de5c8d5849e..17b5318fad39 100644
> > > --- a/mm/vmscan.c
> > > +++ b/mm/vmscan.c
> > [ ... ]
> > > @@ -4633,21 +4632,6 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c
> > > return true;
> > > }
> > >
> > > - dirty = folio_test_dirty(folio);
> > > - writeback = folio_test_writeback(folio);
> > > - if (type == LRU_GEN_FILE && dirty) {
> > > - sc->nr.file_taken += delta;
> > > - if (!writeback)
> > > - sc->nr.unqueued_dirty += delta;
> > > - }
> > > -
> > > - /* waiting for writeback */
> > > - if (writeback || (type == LRU_GEN_FILE && dirty)) {
> > > - gen = folio_inc_gen(lruvec, folio, true);
> > > - list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
> > > - return true;
> > > - }
> > > -
> > > return false;
> > > }
> >
> > Could removing this proactive promotion cause a livelock and premature OOM
> > during GFP_NOIO or GFP_NOFS direct reclaim?
> > Without this check, dirty file folios fall through to isolate_folio(). If
> > the reclaim does not have __GFP_IO, isolate_folio() will skip them.
> > These skipped folios are spliced back to the front of the oldest generation
> > list in scan_folios(). Because isolate_folio() isolates 0 folios,
> > evict_folios() returns early.
> > This completely bypasses the new wakeup_flusher_threads() call, leaving the
> > dirty folios unqueued for writeback, and the oldest generation can never
> > advance.
>
> This is a nice found. For GFP_NOIO (or actually !__GFP_IO), we also need
> to active and set reclaim for the dirty folios. It's a narrow case though.
>
> Following update should work:
>
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 8170aee096e9..342ba3afe77c 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -4641,8 +4641,7 @@ static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct sca
>
> /* swap constrained */
> if (!(sc->gfp_mask & __GFP_IO) &&
> - (folio_test_dirty(folio) ||
> - (folio_test_anon(folio) && !folio_test_swapcache(folio))))
> + (folio_test_anon(folio) && !folio_test_swapcache(folio)))
Or this check should just be removed. shrink_folio_list already has a
check for swap and a more accurate may_enter_fs check.
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH v2 09/12] mm/mglru: remove no longer used reclaim argument for folio protection
2026-03-28 19:52 [PATCH v2 00/12] mm/mglru: improve reclaim loop and dirty folio handling Kairui Song via B4 Relay
` (7 preceding siblings ...)
2026-03-28 19:52 ` [PATCH v2 08/12] mm/mglru: simplify and improve dirty writeback handling Kairui Song via B4 Relay
@ 2026-03-28 19:52 ` Kairui Song via B4 Relay
2026-03-28 19:52 ` [PATCH v2 10/12] mm/vmscan: remove sc->file_taken Kairui Song via B4 Relay
` (2 subsequent siblings)
11 siblings, 0 replies; 19+ messages in thread
From: Kairui Song via B4 Relay @ 2026-03-28 19:52 UTC (permalink / raw)
To: linux-mm
Cc: Andrew Morton, Axel Rasmussen, Yuanchu Xie, Wei Xu,
Johannes Weiner, David Hildenbrand, Michal Hocko, Qi Zheng,
Shakeel Butt, Lorenzo Stoakes, Barry Song, David Stevens,
Chen Ridong, Leno Hou, Yafang Shao, Yu Zhao, Zicheng Wang,
Kalesh Singh, Suren Baghdasaryan, Chris Li, Vernon Yang,
linux-kernel, Qi Zheng, Baolin Wang, Kairui Song
From: Kairui Song <kasong@tencent.com>
Now dirty reclaim folios are handled after isolation, not before,
since dirty reactivation must take the folio off LRU first, and that
helps to unify the dirty handling logic.
So this argument is no longer needed. Just remove it.
Signed-off-by: Kairui Song <kasong@tencent.com>
---
mm/vmscan.c | 11 ++++-------
1 file changed, 4 insertions(+), 7 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 17b5318fad39..07667649c5e2 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3220,7 +3220,7 @@ static int folio_update_gen(struct folio *folio, int gen)
}
/* protect pages accessed multiple times through file descriptors */
-static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
+static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio)
{
int type = folio_is_file_lru(folio);
struct lru_gen_folio *lrugen = &lruvec->lrugen;
@@ -3239,9 +3239,6 @@ static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclai
new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_FLAGS);
new_flags |= (new_gen + 1UL) << LRU_GEN_PGOFF;
- /* for folio_end_writeback() */
- if (reclaiming)
- new_flags |= BIT(PG_reclaim);
} while (!try_cmpxchg(&folio->flags.f, &old_flags, new_flags));
lru_gen_update_size(lruvec, folio, old_gen, new_gen);
@@ -3855,7 +3852,7 @@ static bool inc_min_seq(struct lruvec *lruvec, int type, int swappiness)
VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio);
VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio);
- new_gen = folio_inc_gen(lruvec, folio, false);
+ new_gen = folio_inc_gen(lruvec, folio);
list_move_tail(&folio->lru, &lrugen->folios[new_gen][type][zone]);
/* don't count the workingset being lazily promoted */
@@ -4612,7 +4609,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c
/* protected */
if (tier > tier_idx || refs + workingset == BIT(LRU_REFS_WIDTH) + 1) {
- gen = folio_inc_gen(lruvec, folio, false);
+ gen = folio_inc_gen(lruvec, folio);
list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
/* don't count the workingset being lazily promoted */
@@ -4627,7 +4624,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c
/* ineligible */
if (zone > sc->reclaim_idx) {
- gen = folio_inc_gen(lruvec, folio, false);
+ gen = folio_inc_gen(lruvec, folio);
list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
return true;
}
--
2.53.0
^ permalink raw reply related [flat|nested] 19+ messages in thread* [PATCH v2 10/12] mm/vmscan: remove sc->file_taken
2026-03-28 19:52 [PATCH v2 00/12] mm/mglru: improve reclaim loop and dirty folio handling Kairui Song via B4 Relay
` (8 preceding siblings ...)
2026-03-28 19:52 ` [PATCH v2 09/12] mm/mglru: remove no longer used reclaim argument for folio protection Kairui Song via B4 Relay
@ 2026-03-28 19:52 ` Kairui Song via B4 Relay
2026-03-28 19:52 ` [PATCH v2 11/12] mm/vmscan: remove sc->unqueued_dirty Kairui Song via B4 Relay
2026-03-28 19:52 ` [PATCH v2 12/12] mm/vmscan: unify writeback reclaim statistic and throttling Kairui Song via B4 Relay
11 siblings, 0 replies; 19+ messages in thread
From: Kairui Song via B4 Relay @ 2026-03-28 19:52 UTC (permalink / raw)
To: linux-mm
Cc: Andrew Morton, Axel Rasmussen, Yuanchu Xie, Wei Xu,
Johannes Weiner, David Hildenbrand, Michal Hocko, Qi Zheng,
Shakeel Butt, Lorenzo Stoakes, Barry Song, David Stevens,
Chen Ridong, Leno Hou, Yafang Shao, Yu Zhao, Zicheng Wang,
Kalesh Singh, Suren Baghdasaryan, Chris Li, Vernon Yang,
linux-kernel, Qi Zheng, Baolin Wang, Kairui Song
From: Kairui Song <kasong@tencent.com>
No one is using it now, just remove it.
Reviewed-by: Axel Rasmussen <axelrasmussen@google.com>
Signed-off-by: Kairui Song <kasong@tencent.com>
---
mm/vmscan.c | 3 ---
1 file changed, 3 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 07667649c5e2..603be5ef3ef2 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -173,7 +173,6 @@ struct scan_control {
unsigned int congested;
unsigned int writeback;
unsigned int immediate;
- unsigned int file_taken;
unsigned int taken;
} nr;
@@ -2040,8 +2039,6 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
sc->nr.writeback += stat.nr_writeback;
sc->nr.immediate += stat.nr_immediate;
sc->nr.taken += nr_taken;
- if (file)
- sc->nr.file_taken += nr_taken;
trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
nr_scanned, nr_reclaimed, &stat, sc->priority, file);
--
2.53.0
^ permalink raw reply related [flat|nested] 19+ messages in thread* [PATCH v2 11/12] mm/vmscan: remove sc->unqueued_dirty
2026-03-28 19:52 [PATCH v2 00/12] mm/mglru: improve reclaim loop and dirty folio handling Kairui Song via B4 Relay
` (9 preceding siblings ...)
2026-03-28 19:52 ` [PATCH v2 10/12] mm/vmscan: remove sc->file_taken Kairui Song via B4 Relay
@ 2026-03-28 19:52 ` Kairui Song via B4 Relay
2026-03-28 19:52 ` [PATCH v2 12/12] mm/vmscan: unify writeback reclaim statistic and throttling Kairui Song via B4 Relay
11 siblings, 0 replies; 19+ messages in thread
From: Kairui Song via B4 Relay @ 2026-03-28 19:52 UTC (permalink / raw)
To: linux-mm
Cc: Andrew Morton, Axel Rasmussen, Yuanchu Xie, Wei Xu,
Johannes Weiner, David Hildenbrand, Michal Hocko, Qi Zheng,
Shakeel Butt, Lorenzo Stoakes, Barry Song, David Stevens,
Chen Ridong, Leno Hou, Yafang Shao, Yu Zhao, Zicheng Wang,
Kalesh Singh, Suren Baghdasaryan, Chris Li, Vernon Yang,
linux-kernel, Qi Zheng, Baolin Wang, Kairui Song
From: Kairui Song <kasong@tencent.com>
No one is using it now, just remove it.
Suggested-by: Axel Rasmussen <axelrasmussen@google.com>
Signed-off-by: Kairui Song <kasong@tencent.com>
---
mm/vmscan.c | 2 --
1 file changed, 2 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 603be5ef3ef2..1783da54ada1 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -169,7 +169,6 @@ struct scan_control {
struct {
unsigned int dirty;
- unsigned int unqueued_dirty;
unsigned int congested;
unsigned int writeback;
unsigned int immediate;
@@ -2035,7 +2034,6 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
sc->nr.dirty += stat.nr_dirty;
sc->nr.congested += stat.nr_congested;
- sc->nr.unqueued_dirty += stat.nr_unqueued_dirty;
sc->nr.writeback += stat.nr_writeback;
sc->nr.immediate += stat.nr_immediate;
sc->nr.taken += nr_taken;
--
2.53.0
^ permalink raw reply related [flat|nested] 19+ messages in thread* [PATCH v2 12/12] mm/vmscan: unify writeback reclaim statistic and throttling
2026-03-28 19:52 [PATCH v2 00/12] mm/mglru: improve reclaim loop and dirty folio handling Kairui Song via B4 Relay
` (10 preceding siblings ...)
2026-03-28 19:52 ` [PATCH v2 11/12] mm/vmscan: remove sc->unqueued_dirty Kairui Song via B4 Relay
@ 2026-03-28 19:52 ` Kairui Song via B4 Relay
11 siblings, 0 replies; 19+ messages in thread
From: Kairui Song via B4 Relay @ 2026-03-28 19:52 UTC (permalink / raw)
To: linux-mm
Cc: Andrew Morton, Axel Rasmussen, Yuanchu Xie, Wei Xu,
Johannes Weiner, David Hildenbrand, Michal Hocko, Qi Zheng,
Shakeel Butt, Lorenzo Stoakes, Barry Song, David Stevens,
Chen Ridong, Leno Hou, Yafang Shao, Yu Zhao, Zicheng Wang,
Kalesh Singh, Suren Baghdasaryan, Chris Li, Vernon Yang,
linux-kernel, Qi Zheng, Baolin Wang, Kairui Song
From: Kairui Song <kasong@tencent.com>
Currently MGLRU and non-MGLRU handle the reclaim statistic and
writeback handling very differently, especially throttling.
Basically MGLRU just ignored the throttling part.
Let's just unify this part, use a helper to deduplicate the code
so both setups will share the same behavior. Also remove the
folio_clear_reclaim in isolate_folio which was actively invalidating
the congestion control. PG_reclaim is now handled by shrink_folio_list,
keeping it in isolate_folio is not helpful.
Test using following reproducer using bash:
echo "Setup a slow device using dm delay"
dd if=/dev/zero of=/var/tmp/backing bs=1M count=2048
LOOP=$(losetup --show -f /var/tmp/backing)
mkfs.ext4 -q $LOOP
echo "0 $(blockdev --getsz $LOOP) delay $LOOP 0 0 $LOOP 0 1000" | \
dmsetup create slow_dev
mkdir -p /mnt/slow && mount /dev/mapper/slow_dev /mnt/slow
echo "Start writeback pressure"
sync && echo 3 > /proc/sys/vm/drop_caches
mkdir /sys/fs/cgroup/test_wb
echo 128M > /sys/fs/cgroup/test_wb/memory.max
(echo $BASHPID > /sys/fs/cgroup/test_wb/cgroup.procs && \
dd if=/dev/zero of=/mnt/slow/testfile bs=1M count=192)
echo "Clean up"
echo "0 $(blockdev --getsz $LOOP) error" | dmsetup load slow_dev
dmsetup resume slow_dev
umount -l /mnt/slow && sync
dmsetup remove slow_dev
Before this commit, `dd` will get OOM killed immediately if
MGLRU is enabled. Classic LRU is fine.
After this commit, congestion control is now effective and no more
spin on LRU or premature OOM.
Stress test on other workloads also looking good.
Suggested-by: Chen Ridong <chenridong@huaweicloud.com>
Signed-off-by: Kairui Song <kasong@tencent.com>
---
mm/vmscan.c | 93 +++++++++++++++++++++++++++----------------------------------
1 file changed, 41 insertions(+), 52 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1783da54ada1..83c8fdf8fdc4 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1942,6 +1942,44 @@ static int current_may_throttle(void)
return !(current->flags & PF_LOCAL_THROTTLE);
}
+static void handle_reclaim_writeback(unsigned long nr_taken,
+ struct pglist_data *pgdat,
+ struct scan_control *sc,
+ struct reclaim_stat *stat)
+{
+ /*
+ * If dirty folios are scanned that are not queued for IO, it
+ * implies that flushers are not doing their job. This can
+ * happen when memory pressure pushes dirty folios to the end of
+ * the LRU before the dirty limits are breached and the dirty
+ * data has expired. It can also happen when the proportion of
+ * dirty folios grows not through writes but through memory
+ * pressure reclaiming all the clean cache. And in some cases,
+ * the flushers simply cannot keep up with the allocation
+ * rate. Nudge the flusher threads in case they are asleep.
+ */
+ if (stat->nr_unqueued_dirty == nr_taken && nr_taken) {
+ wakeup_flusher_threads(WB_REASON_VMSCAN);
+ /*
+ * For cgroupv1 dirty throttling is achieved by waking up
+ * the kernel flusher here and later waiting on folios
+ * which are in writeback to finish (see shrink_folio_list()).
+ *
+ * Flusher may not be able to issue writeback quickly
+ * enough for cgroupv1 writeback throttling to work
+ * on a large system.
+ */
+ if (!writeback_throttling_sane(sc))
+ reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
+ }
+
+ sc->nr.dirty += stat->nr_dirty;
+ sc->nr.congested += stat->nr_congested;
+ sc->nr.writeback += stat->nr_writeback;
+ sc->nr.immediate += stat->nr_immediate;
+ sc->nr.taken += nr_taken;
+}
+
/*
* shrink_inactive_list() is a helper for shrink_node(). It returns the number
* of reclaimed pages
@@ -2005,39 +2043,7 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
lruvec_lock_irq(lruvec);
lru_note_cost_unlock_irq(lruvec, file, stat.nr_pageout,
nr_scanned - nr_reclaimed);
-
- /*
- * If dirty folios are scanned that are not queued for IO, it
- * implies that flushers are not doing their job. This can
- * happen when memory pressure pushes dirty folios to the end of
- * the LRU before the dirty limits are breached and the dirty
- * data has expired. It can also happen when the proportion of
- * dirty folios grows not through writes but through memory
- * pressure reclaiming all the clean cache. And in some cases,
- * the flushers simply cannot keep up with the allocation
- * rate. Nudge the flusher threads in case they are asleep.
- */
- if (stat.nr_unqueued_dirty == nr_taken) {
- wakeup_flusher_threads(WB_REASON_VMSCAN);
- /*
- * For cgroupv1 dirty throttling is achieved by waking up
- * the kernel flusher here and later waiting on folios
- * which are in writeback to finish (see shrink_folio_list()).
- *
- * Flusher may not be able to issue writeback quickly
- * enough for cgroupv1 writeback throttling to work
- * on a large system.
- */
- if (!writeback_throttling_sane(sc))
- reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
- }
-
- sc->nr.dirty += stat.nr_dirty;
- sc->nr.congested += stat.nr_congested;
- sc->nr.writeback += stat.nr_writeback;
- sc->nr.immediate += stat.nr_immediate;
- sc->nr.taken += nr_taken;
-
+ handle_reclaim_writeback(nr_taken, pgdat, sc, &stat);
trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
nr_scanned, nr_reclaimed, &stat, sc->priority, file);
return nr_reclaimed;
@@ -4651,9 +4657,6 @@ static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct sca
if (!folio_test_referenced(folio))
set_mask_bits(&folio->flags.f, LRU_REFS_MASK, 0);
- /* for shrink_folio_list() */
- folio_clear_reclaim(folio);
-
success = lru_gen_del_folio(lruvec, folio, true);
VM_WARN_ON_ONCE_FOLIO(!success, folio);
@@ -4833,26 +4836,11 @@ static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
retry:
reclaimed = shrink_folio_list(&list, pgdat, sc, &stat, false, memcg);
sc->nr_reclaimed += reclaimed;
+ handle_reclaim_writeback(isolated, pgdat, sc, &stat);
trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
type_scanned, reclaimed, &stat, sc->priority,
type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON);
- /*
- * If too many file cache in the coldest generation can't be evicted
- * due to being dirty, wake up the flusher.
- */
- if (stat.nr_unqueued_dirty == isolated) {
- wakeup_flusher_threads(WB_REASON_VMSCAN);
-
- /*
- * For cgroupv1 dirty throttling is achieved by waking up
- * the kernel flusher here and later waiting on folios
- * which are in writeback to finish (see shrink_folio_list()).
- */
- if (!writeback_throttling_sane(sc))
- reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
- }
-
list_for_each_entry_safe_reverse(folio, next, &list, lru) {
DEFINE_MIN_SEQ(lruvec);
@@ -4895,6 +4883,7 @@ static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
if (!list_empty(&list)) {
skip_retry = true;
+ isolated = 0;
goto retry;
}
--
2.53.0
^ permalink raw reply related [flat|nested] 19+ messages in thread