From: Andrew Morton <akpm@linux-foundation.org>
To: mm-commits@vger.kernel.org,hannes@cmpxchg.org,akpm@linux-foundation.org
Subject: [to-be-updated] mm-switch-deferred-split-shrinker-to-list_lru.patch removed from -mm tree
Date: Mon, 30 Mar 2026 12:03:54 -0700 [thread overview]
Message-ID: <20260330190355.196C7C4CEF7@smtp.kernel.org> (raw)
The quilt patch titled
Subject: mm: switch deferred split shrinker to list_lru
has been removed from the -mm tree. Its filename was
mm-switch-deferred-split-shrinker-to-list_lru.patch
This patch was dropped because an updated version will be issued
------------------------------------------------------
From: Johannes Weiner <hannes@cmpxchg.org>
Subject: mm: switch deferred split shrinker to list_lru
Date: Wed, 18 Mar 2026 15:53:25 -0400
The deferred split queue handles cgroups in a suboptimal fashion. The
queue is per-NUMA node or per-cgroup, not the intersection. That means on
a cgrouped system, a node-restricted allocation entering reclaim can end
up splitting large pages on other nodes:
alloc/unmap
deferred_split_folio()
list_add_tail(memcg->split_queue)
set_shrinker_bit(memcg, node, deferred_shrinker_id)
for_each_zone_zonelist_nodemask(restricted_nodes)
mem_cgroup_iter()
shrink_slab(node, memcg)
shrink_slab_memcg(node, memcg)
if test_shrinker_bit(memcg, node, deferred_shrinker_id)
deferred_split_scan()
walks memcg->split_queue
The shrinker bit adds an imperfect guard rail. As soon as the cgroup has
a single large page on the node of interest, all large pages owned by that
memcg, including those on other nodes, will be split.
list_lru properly sets up per-node, per-cgroup lists. As a bonus, it
streamlines a lot of the list operations and reclaim walks. It's used
widely by other major shrinkers already. Convert the deferred split queue
as well.
The list_lru per-memcg heads are instantiated on demand when the first
object of interest is allocated for a cgroup, by calling
folio_memcg_list_lru_alloc(). Add calls to where splittable pages are
created: anon faults, swapin faults, khugepaged collapse.
These calls create all possible node heads for the cgroup at once, so the
migration code (between nodes) doesn't need any special care.
Link: https://lkml.kernel.org/r/20260318200352.1039011-8-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: David Hildenbrand (Arm) <david@kernel.org>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <ljs@kernel.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Nico Pache <npache@redhat.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@kernel.org>
Cc: Wei Xu <weixugc@google.com>
Cc: Yuanchu Xie <yuanchu@google.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---
include/linux/huge_mm.h | 6
include/linux/memcontrol.h | 4
include/linux/mmzone.h | 12 -
mm/huge_memory.c | 342 ++++++++++-------------------------
mm/internal.h | 2
mm/khugepaged.c | 7
mm/memcontrol.c | 12 -
mm/memory.c | 52 +++--
mm/mm_init.c | 15 -
9 files changed, 151 insertions(+), 301 deletions(-)
--- a/include/linux/huge_mm.h~mm-switch-deferred-split-shrinker-to-list_lru
+++ a/include/linux/huge_mm.h
@@ -414,10 +414,9 @@ static inline int split_huge_page(struct
{
return split_huge_page_to_list_to_order(page, NULL, 0);
}
+
+extern struct list_lru deferred_split_lru;
void deferred_split_folio(struct folio *folio, bool partially_mapped);
-#ifdef CONFIG_MEMCG
-void reparent_deferred_split_queue(struct mem_cgroup *memcg);
-#endif
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long address, bool freeze);
@@ -650,7 +649,6 @@ static inline int try_folio_split_to_ord
}
static inline void deferred_split_folio(struct folio *folio, bool partially_mapped) {}
-static inline void reparent_deferred_split_queue(struct mem_cgroup *memcg) {}
#define split_huge_pmd(__vma, __pmd, __address) \
do { } while (0)
--- a/include/linux/memcontrol.h~mm-switch-deferred-split-shrinker-to-list_lru
+++ a/include/linux/memcontrol.h
@@ -277,10 +277,6 @@ struct mem_cgroup {
struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
#endif
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- struct deferred_split deferred_split_queue;
-#endif
-
#ifdef CONFIG_LRU_GEN_WALKS_MMU
/* per-memcg mm_struct list */
struct lru_gen_mm_list mm_list;
--- a/include/linux/mmzone.h~mm-switch-deferred-split-shrinker-to-list_lru
+++ a/include/linux/mmzone.h
@@ -1426,14 +1426,6 @@ struct zonelist {
*/
extern struct page *mem_map;
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-struct deferred_split {
- spinlock_t split_queue_lock;
- struct list_head split_queue;
- unsigned long split_queue_len;
-};
-#endif
-
#ifdef CONFIG_MEMORY_FAILURE
/*
* Per NUMA node memory failure handling statistics.
@@ -1559,10 +1551,6 @@ typedef struct pglist_data {
unsigned long first_deferred_pfn;
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- struct deferred_split deferred_split_queue;
-#endif
-
#ifdef CONFIG_NUMA_BALANCING
/* start time in ms of current promote rate limit period */
unsigned int nbp_rl_start;
--- a/mm/huge_memory.c~mm-switch-deferred-split-shrinker-to-list_lru
+++ a/mm/huge_memory.c
@@ -14,6 +14,7 @@
#include <linux/mmu_notifier.h>
#include <linux/rmap.h>
#include <linux/swap.h>
+#include <linux/list_lru.h>
#include <linux/shrinker.h>
#include <linux/mm_inline.h>
#include <linux/swapops.h>
@@ -67,6 +68,8 @@ unsigned long transparent_hugepage_flags
(1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
+static struct lock_class_key deferred_split_key;
+struct list_lru deferred_split_lru;
static struct shrinker *deferred_split_shrinker;
static unsigned long deferred_split_count(struct shrinker *shrink,
struct shrink_control *sc);
@@ -927,6 +930,13 @@ static int __init thp_shrinker_init(void
if (!deferred_split_shrinker)
return -ENOMEM;
+ if (list_lru_init_memcg_key(&deferred_split_lru,
+ deferred_split_shrinker,
+ &deferred_split_key)) {
+ shrinker_free(deferred_split_shrinker);
+ return -ENOMEM;
+ }
+
deferred_split_shrinker->count_objects = deferred_split_count;
deferred_split_shrinker->scan_objects = deferred_split_scan;
shrinker_register(deferred_split_shrinker);
@@ -947,6 +957,7 @@ static int __init thp_shrinker_init(void
huge_zero_folio_shrinker = shrinker_alloc(0, "thp-zero");
if (!huge_zero_folio_shrinker) {
+ list_lru_destroy(&deferred_split_lru);
shrinker_free(deferred_split_shrinker);
return -ENOMEM;
}
@@ -961,6 +972,7 @@ static int __init thp_shrinker_init(void
static void __init thp_shrinker_exit(void)
{
shrinker_free(huge_zero_folio_shrinker);
+ list_lru_destroy(&deferred_split_lru);
shrinker_free(deferred_split_shrinker);
}
@@ -1141,119 +1153,6 @@ pmd_t maybe_pmd_mkwrite(pmd_t pmd, struc
return pmd;
}
-static struct deferred_split *split_queue_node(int nid)
-{
- struct pglist_data *pgdata = NODE_DATA(nid);
-
- return &pgdata->deferred_split_queue;
-}
-
-#ifdef CONFIG_MEMCG
-static inline
-struct mem_cgroup *folio_split_queue_memcg(struct folio *folio,
- struct deferred_split *queue)
-{
- if (mem_cgroup_disabled())
- return NULL;
- if (split_queue_node(folio_nid(folio)) == queue)
- return NULL;
- return container_of(queue, struct mem_cgroup, deferred_split_queue);
-}
-
-static struct deferred_split *memcg_split_queue(int nid, struct mem_cgroup *memcg)
-{
- return memcg ? &memcg->deferred_split_queue : split_queue_node(nid);
-}
-#else
-static inline
-struct mem_cgroup *folio_split_queue_memcg(struct folio *folio,
- struct deferred_split *queue)
-{
- return NULL;
-}
-
-static struct deferred_split *memcg_split_queue(int nid, struct mem_cgroup *memcg)
-{
- return split_queue_node(nid);
-}
-#endif
-
-static struct deferred_split *split_queue_lock(int nid, struct mem_cgroup *memcg)
-{
- struct deferred_split *queue;
-
-retry:
- queue = memcg_split_queue(nid, memcg);
- spin_lock(&queue->split_queue_lock);
- /*
- * There is a period between setting memcg to dying and reparenting
- * deferred split queue, and during this period the THPs in the deferred
- * split queue will be hidden from the shrinker side.
- */
- if (unlikely(memcg_is_dying(memcg))) {
- spin_unlock(&queue->split_queue_lock);
- memcg = parent_mem_cgroup(memcg);
- goto retry;
- }
-
- return queue;
-}
-
-static struct deferred_split *
-split_queue_lock_irqsave(int nid, struct mem_cgroup *memcg, unsigned long *flags)
-{
- struct deferred_split *queue;
-
-retry:
- queue = memcg_split_queue(nid, memcg);
- spin_lock_irqsave(&queue->split_queue_lock, *flags);
- if (unlikely(memcg_is_dying(memcg))) {
- spin_unlock_irqrestore(&queue->split_queue_lock, *flags);
- memcg = parent_mem_cgroup(memcg);
- goto retry;
- }
-
- return queue;
-}
-
-static struct deferred_split *folio_split_queue_lock(struct folio *folio)
-{
- struct deferred_split *queue;
-
- rcu_read_lock();
- queue = split_queue_lock(folio_nid(folio), folio_memcg(folio));
- /*
- * The memcg destruction path is acquiring the split queue lock for
- * reparenting. Once you have it locked, it's safe to drop the rcu lock.
- */
- rcu_read_unlock();
-
- return queue;
-}
-
-static struct deferred_split *
-folio_split_queue_lock_irqsave(struct folio *folio, unsigned long *flags)
-{
- struct deferred_split *queue;
-
- rcu_read_lock();
- queue = split_queue_lock_irqsave(folio_nid(folio), folio_memcg(folio), flags);
- rcu_read_unlock();
-
- return queue;
-}
-
-static inline void split_queue_unlock(struct deferred_split *queue)
-{
- spin_unlock(&queue->split_queue_lock);
-}
-
-static inline void split_queue_unlock_irqrestore(struct deferred_split *queue,
- unsigned long flags)
-{
- spin_unlock_irqrestore(&queue->split_queue_lock, flags);
-}
-
static inline bool is_transparent_hugepage(const struct folio *folio)
{
if (!folio_test_large(folio))
@@ -1354,6 +1253,14 @@ static struct folio *vma_alloc_anon_foli
count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
return NULL;
}
+
+ if (folio_memcg_list_lru_alloc(folio, &deferred_split_lru, gfp)) {
+ folio_put(folio);
+ count_vm_event(THP_FAULT_FALLBACK);
+ count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK);
+ return NULL;
+ }
+
folio_throttle_swaprate(folio, gfp);
/*
@@ -3887,34 +3794,34 @@ static int __folio_freeze_and_split_unma
struct folio *end_folio = folio_next(folio);
struct folio *new_folio, *next;
int old_order = folio_order(folio);
+ struct list_lru_one *l;
+ bool dequeue_deferred;
int ret = 0;
- struct deferred_split *ds_queue;
VM_WARN_ON_ONCE(!mapping && end);
/* Prevent deferred_split_scan() touching ->_refcount */
- ds_queue = folio_split_queue_lock(folio);
+ dequeue_deferred = folio_test_anon(folio) && old_order > 1;
+ if (dequeue_deferred) {
+ rcu_read_lock();
+ l = list_lru_lock(&deferred_split_lru,
+ folio_nid(folio), folio_memcg(folio));
+ }
if (folio_ref_freeze(folio, folio_cache_ref_count(folio) + 1)) {
struct swap_cluster_info *ci = NULL;
struct lruvec *lruvec;
- if (old_order > 1) {
- if (!list_empty(&folio->_deferred_list)) {
- ds_queue->split_queue_len--;
- /*
- * Reinitialize page_deferred_list after removing the
- * page from the split_queue, otherwise a subsequent
- * split will see list corruption when checking the
- * page_deferred_list.
- */
- list_del_init(&folio->_deferred_list);
- }
+ if (dequeue_deferred) {
+ __list_lru_del(&deferred_split_lru, l,
+ &folio->_deferred_list, folio_nid(folio));
if (folio_test_partially_mapped(folio)) {
folio_clear_partially_mapped(folio);
mod_mthp_stat(old_order,
MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
}
+ list_lru_unlock(l);
+ rcu_read_unlock();
}
- split_queue_unlock(ds_queue);
+
if (mapping) {
int nr = folio_nr_pages(folio);
@@ -4015,7 +3922,10 @@ static int __folio_freeze_and_split_unma
if (ci)
swap_cluster_unlock(ci);
} else {
- split_queue_unlock(ds_queue);
+ if (dequeue_deferred) {
+ list_lru_unlock(l);
+ rcu_read_unlock();
+ }
return -EAGAIN;
}
@@ -4382,33 +4292,35 @@ int split_folio_to_list(struct folio *fo
* queueing THP splits, and that list is (racily observed to be) non-empty.
*
* It is unsafe to call folio_unqueue_deferred_split() until folio refcount is
- * zero: because even when split_queue_lock is held, a non-empty _deferred_list
- * might be in use on deferred_split_scan()'s unlocked on-stack list.
+ * zero: because even when the list_lru lock is held, a non-empty
+ * _deferred_list might be in use on deferred_split_scan()'s unlocked
+ * on-stack list.
*
- * If memory cgroups are enabled, split_queue_lock is in the mem_cgroup: it is
- * therefore important to unqueue deferred split before changing folio memcg.
+ * The list_lru sublist is determined by folio's memcg: it is therefore
+ * important to unqueue deferred split before changing folio memcg.
*/
bool __folio_unqueue_deferred_split(struct folio *folio)
{
- struct deferred_split *ds_queue;
+ struct list_lru_one *l;
+ int nid = folio_nid(folio);
unsigned long flags;
bool unqueued = false;
WARN_ON_ONCE(folio_ref_count(folio));
WARN_ON_ONCE(!mem_cgroup_disabled() && !folio_memcg_charged(folio));
- ds_queue = folio_split_queue_lock_irqsave(folio, &flags);
- if (!list_empty(&folio->_deferred_list)) {
- ds_queue->split_queue_len--;
+ rcu_read_lock();
+ l = list_lru_lock_irqsave(&deferred_split_lru, nid, folio_memcg(folio), &flags);
+ if (__list_lru_del(&deferred_split_lru, l, &folio->_deferred_list, nid)) {
if (folio_test_partially_mapped(folio)) {
folio_clear_partially_mapped(folio);
mod_mthp_stat(folio_order(folio),
MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
}
- list_del_init(&folio->_deferred_list);
unqueued = true;
}
- split_queue_unlock_irqrestore(ds_queue, flags);
+ list_lru_unlock_irqrestore(l, &flags);
+ rcu_read_unlock();
return unqueued; /* useful for debug warnings */
}
@@ -4416,7 +4328,9 @@ bool __folio_unqueue_deferred_split(stru
/* partially_mapped=false won't clear PG_partially_mapped folio flag */
void deferred_split_folio(struct folio *folio, bool partially_mapped)
{
- struct deferred_split *ds_queue;
+ struct list_lru_one *l;
+ int nid;
+ struct mem_cgroup *memcg;
unsigned long flags;
/*
@@ -4439,7 +4353,11 @@ void deferred_split_folio(struct folio *
if (folio_test_swapcache(folio))
return;
- ds_queue = folio_split_queue_lock_irqsave(folio, &flags);
+ nid = folio_nid(folio);
+
+ rcu_read_lock();
+ memcg = folio_memcg(folio);
+ l = list_lru_lock_irqsave(&deferred_split_lru, nid, memcg, &flags);
if (partially_mapped) {
if (!folio_test_partially_mapped(folio)) {
folio_set_partially_mapped(folio);
@@ -4447,36 +4365,20 @@ void deferred_split_folio(struct folio *
count_vm_event(THP_DEFERRED_SPLIT_PAGE);
count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED);
mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, 1);
-
}
} else {
/* partially mapped folios cannot become non-partially mapped */
VM_WARN_ON_FOLIO(folio_test_partially_mapped(folio), folio);
}
- if (list_empty(&folio->_deferred_list)) {
- struct mem_cgroup *memcg;
-
- memcg = folio_split_queue_memcg(folio, ds_queue);
- list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
- ds_queue->split_queue_len++;
- if (memcg)
- set_shrinker_bit(memcg, folio_nid(folio),
- shrinker_id(deferred_split_shrinker));
- }
- split_queue_unlock_irqrestore(ds_queue, flags);
+ __list_lru_add(&deferred_split_lru, l, &folio->_deferred_list, nid, memcg);
+ list_lru_unlock_irqrestore(l, &flags);
+ rcu_read_unlock();
}
static unsigned long deferred_split_count(struct shrinker *shrink,
struct shrink_control *sc)
{
- struct pglist_data *pgdata = NODE_DATA(sc->nid);
- struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
-
-#ifdef CONFIG_MEMCG
- if (sc->memcg)
- ds_queue = &sc->memcg->deferred_split_queue;
-#endif
- return READ_ONCE(ds_queue->split_queue_len);
+ return list_lru_shrink_count(&deferred_split_lru, sc);
}
static bool thp_underused(struct folio *folio)
@@ -4506,45 +4408,47 @@ static bool thp_underused(struct folio *
return false;
}
+static enum lru_status deferred_split_isolate(struct list_head *item,
+ struct list_lru_one *lru,
+ void *cb_arg)
+{
+ struct folio *folio = container_of(item, struct folio, _deferred_list);
+ struct list_head *freeable = cb_arg;
+
+ if (folio_try_get(folio)) {
+ list_lru_isolate_move(lru, item, freeable);
+ return LRU_REMOVED;
+ }
+
+ /* We lost race with folio_put() */
+ list_lru_isolate(lru, item);
+ if (folio_test_partially_mapped(folio)) {
+ folio_clear_partially_mapped(folio);
+ mod_mthp_stat(folio_order(folio),
+ MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
+ }
+ return LRU_REMOVED;
+}
+
static unsigned long deferred_split_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
- struct deferred_split *ds_queue;
- unsigned long flags;
+ LIST_HEAD(dispose);
struct folio *folio, *next;
- int split = 0, i;
- struct folio_batch fbatch;
-
- folio_batch_init(&fbatch);
+ int split = 0;
+ unsigned long isolated;
-retry:
- ds_queue = split_queue_lock_irqsave(sc->nid, sc->memcg, &flags);
- /* Take pin on all head pages to avoid freeing them under us */
- list_for_each_entry_safe(folio, next, &ds_queue->split_queue,
- _deferred_list) {
- if (folio_try_get(folio)) {
- folio_batch_add(&fbatch, folio);
- } else if (folio_test_partially_mapped(folio)) {
- /* We lost race with folio_put() */
- folio_clear_partially_mapped(folio);
- mod_mthp_stat(folio_order(folio),
- MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
- }
- list_del_init(&folio->_deferred_list);
- ds_queue->split_queue_len--;
- if (!--sc->nr_to_scan)
- break;
- if (!folio_batch_space(&fbatch))
- break;
- }
- split_queue_unlock_irqrestore(ds_queue, flags);
+ isolated = list_lru_shrink_walk_irq(&deferred_split_lru, sc,
+ deferred_split_isolate, &dispose);
- for (i = 0; i < folio_batch_count(&fbatch); i++) {
+ list_for_each_entry_safe(folio, next, &dispose, _deferred_list) {
bool did_split = false;
bool underused = false;
- struct deferred_split *fqueue;
+ struct list_lru_one *l;
+ unsigned long flags;
+
+ list_del_init(&folio->_deferred_list);
- folio = fbatch.folios[i];
if (!folio_test_partially_mapped(folio)) {
/*
* See try_to_map_unused_to_zeropage(): we cannot
@@ -4567,64 +4471,32 @@ retry:
}
folio_unlock(folio);
next:
- if (did_split || !folio_test_partially_mapped(folio))
- continue;
/*
* Only add back to the queue if folio is partially mapped.
* If thp_underused returns false, or if split_folio fails
* in the case it was underused, then consider it used and
* don't add it back to split_queue.
*/
- fqueue = folio_split_queue_lock_irqsave(folio, &flags);
- if (list_empty(&folio->_deferred_list)) {
- list_add_tail(&folio->_deferred_list, &fqueue->split_queue);
- fqueue->split_queue_len++;
+ if (!did_split && folio_test_partially_mapped(folio)) {
+ rcu_read_lock();
+ l = list_lru_lock_irqsave(&deferred_split_lru,
+ folio_nid(folio),
+ folio_memcg(folio),
+ &flags);
+ __list_lru_add(&deferred_split_lru, l,
+ &folio->_deferred_list,
+ folio_nid(folio), folio_memcg(folio));
+ list_lru_unlock_irqrestore(l, &flags);
+ rcu_read_unlock();
}
- split_queue_unlock_irqrestore(fqueue, flags);
- }
- folios_put(&fbatch);
-
- if (sc->nr_to_scan && !list_empty(&ds_queue->split_queue)) {
- cond_resched();
- goto retry;
+ folio_put(folio);
}
- /*
- * Stop shrinker if we didn't split any page, but the queue is empty.
- * This can happen if pages were freed under us.
- */
- if (!split && list_empty(&ds_queue->split_queue))
+ if (!split && !isolated)
return SHRINK_STOP;
return split;
}
-#ifdef CONFIG_MEMCG
-void reparent_deferred_split_queue(struct mem_cgroup *memcg)
-{
- struct mem_cgroup *parent = parent_mem_cgroup(memcg);
- struct deferred_split *ds_queue = &memcg->deferred_split_queue;
- struct deferred_split *parent_ds_queue = &parent->deferred_split_queue;
- int nid;
-
- spin_lock_irq(&ds_queue->split_queue_lock);
- spin_lock_nested(&parent_ds_queue->split_queue_lock, SINGLE_DEPTH_NESTING);
-
- if (!ds_queue->split_queue_len)
- goto unlock;
-
- list_splice_tail_init(&ds_queue->split_queue, &parent_ds_queue->split_queue);
- parent_ds_queue->split_queue_len += ds_queue->split_queue_len;
- ds_queue->split_queue_len = 0;
-
- for_each_node(nid)
- set_shrinker_bit(parent, nid, shrinker_id(deferred_split_shrinker));
-
-unlock:
- spin_unlock(&parent_ds_queue->split_queue_lock);
- spin_unlock_irq(&ds_queue->split_queue_lock);
-}
-#endif
-
#ifdef CONFIG_DEBUG_FS
static void split_huge_pages_all(void)
{
--- a/mm/internal.h~mm-switch-deferred-split-shrinker-to-list_lru
+++ a/mm/internal.h
@@ -862,7 +862,7 @@ static inline bool folio_unqueue_deferre
/*
* At this point, there is no one trying to add the folio to
* deferred_list. If folio is not in deferred_list, it's safe
- * to check without acquiring the split_queue_lock.
+ * to check without acquiring the list_lru lock.
*/
if (data_race(list_empty(&folio->_deferred_list)))
return false;
--- a/mm/khugepaged.c~mm-switch-deferred-split-shrinker-to-list_lru
+++ a/mm/khugepaged.c
@@ -1081,6 +1081,7 @@ static enum scan_result alloc_charge_fol
}
count_vm_event(THP_COLLAPSE_ALLOC);
+
if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
folio_put(folio);
*foliop = NULL;
@@ -1089,6 +1090,12 @@ static enum scan_result alloc_charge_fol
count_memcg_folio_events(folio, THP_COLLAPSE_ALLOC, 1);
+ if (folio_memcg_list_lru_alloc(folio, &deferred_split_lru, gfp)) {
+ folio_put(folio);
+ *foliop = NULL;
+ return SCAN_CGROUP_CHARGE_FAIL;
+ }
+
*foliop = folio;
return SCAN_SUCCEED;
}
--- a/mm/memcontrol.c~mm-switch-deferred-split-shrinker-to-list_lru
+++ a/mm/memcontrol.c
@@ -4025,11 +4025,6 @@ static struct mem_cgroup *mem_cgroup_all
memcg->cgwb_frn[i].done =
__WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
#endif
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
- INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
- memcg->deferred_split_queue.split_queue_len = 0;
-#endif
lru_gen_init_memcg(memcg);
return memcg;
fail:
@@ -4180,11 +4175,10 @@ static void mem_cgroup_css_offline(struc
zswap_memcg_offline_cleanup(memcg);
memcg_offline_kmem(memcg);
- reparent_deferred_split_queue(memcg);
/*
- * The reparenting of objcg must be after the reparenting of the
- * list_lru and deferred_split_queue above, which ensures that they will
- * not mistakenly get the parent list_lru and deferred_split_queue.
+ * The reparenting of objcg must be after the reparenting of
+ * the list_lru in memcg_offline_kmem(), which ensures that
+ * they will not mistakenly get the parent list_lru.
*/
memcg_reparent_objcgs(memcg);
reparent_shrinker_deferred(memcg);
--- a/mm/memory.c~mm-switch-deferred-split-shrinker-to-list_lru
+++ a/mm/memory.c
@@ -4736,13 +4736,19 @@ static struct folio *alloc_swap_folio(st
while (orders) {
addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
folio = vma_alloc_folio(gfp, order, vma, addr);
- if (folio) {
- if (!mem_cgroup_swapin_charge_folio(folio, vma->vm_mm,
- gfp, entry))
- return folio;
+ if (!folio)
+ goto next;
+ if (mem_cgroup_swapin_charge_folio(folio, vma->vm_mm, gfp, entry)) {
count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK_CHARGE);
folio_put(folio);
+ goto next;
}
+ if (folio_memcg_list_lru_alloc(folio, &deferred_split_lru, gfp)) {
+ folio_put(folio);
+ goto fallback;
+ }
+ return folio;
+next:
count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK);
order = next_order(&orders, order);
}
@@ -5254,24 +5260,28 @@ static struct folio *alloc_anon_folio(st
while (orders) {
addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
folio = vma_alloc_folio(gfp, order, vma, addr);
- if (folio) {
- if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
- count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
- folio_put(folio);
- goto next;
- }
- folio_throttle_swaprate(folio, gfp);
- /*
- * When a folio is not zeroed during allocation
- * (__GFP_ZERO not used) or user folios require special
- * handling, folio_zero_user() is used to make sure
- * that the page corresponding to the faulting address
- * will be hot in the cache after zeroing.
- */
- if (user_alloc_needs_zeroing())
- folio_zero_user(folio, vmf->address);
- return folio;
+ if (!folio)
+ goto next;
+ if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
+ count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
+ folio_put(folio);
+ goto next;
+ }
+ if (folio_memcg_list_lru_alloc(folio, &deferred_split_lru, gfp)) {
+ folio_put(folio);
+ goto fallback;
}
+ folio_throttle_swaprate(folio, gfp);
+ /*
+ * When a folio is not zeroed during allocation
+ * (__GFP_ZERO not used) or user folios require special
+ * handling, folio_zero_user() is used to make sure
+ * that the page corresponding to the faulting address
+ * will be hot in the cache after zeroing.
+ */
+ if (user_alloc_needs_zeroing())
+ folio_zero_user(folio, vmf->address);
+ return folio;
next:
count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK);
order = next_order(&orders, order);
--- a/mm/mm_init.c~mm-switch-deferred-split-shrinker-to-list_lru
+++ a/mm/mm_init.c
@@ -1387,19 +1387,6 @@ static void __init calculate_node_totalp
pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
}
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static void pgdat_init_split_queue(struct pglist_data *pgdat)
-{
- struct deferred_split *ds_queue = &pgdat->deferred_split_queue;
-
- spin_lock_init(&ds_queue->split_queue_lock);
- INIT_LIST_HEAD(&ds_queue->split_queue);
- ds_queue->split_queue_len = 0;
-}
-#else
-static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
-#endif
-
#ifdef CONFIG_COMPACTION
static void pgdat_init_kcompactd(struct pglist_data *pgdat)
{
@@ -1415,8 +1402,6 @@ static void __meminit pgdat_init_interna
pgdat_resize_init(pgdat);
pgdat_kswapd_lock_init(pgdat);
-
- pgdat_init_split_queue(pgdat);
pgdat_init_kcompactd(pgdat);
init_waitqueue_head(&pgdat->kswapd_wait);
_
Patches currently in -mm which might be from hannes@cmpxchg.org are
reply other threads:[~2026-03-30 19:03 UTC|newest]
Thread overview: [no followups] expand[flat|nested] mbox.gz Atom feed
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260330190355.196C7C4CEF7@smtp.kernel.org \
--to=akpm@linux-foundation.org \
--cc=hannes@cmpxchg.org \
--cc=mm-commits@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.