public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Johannes Weiner <hannes@cmpxchg.org>
To: "David Hildenbrand (Arm)" <david@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>,
	Shakeel Butt <shakeel.butt@linux.dev>,
	Yosry Ahmed <yosry.ahmed@linux.dev>, Zi Yan <ziy@nvidia.com>,
	"Liam R. Howlett" <Liam.Howlett@oracle.com>,
	Usama Arif <usama.arif@linux.dev>,
	Kiryl Shutsemau <kas@kernel.org>,
	Dave Chinner <david@fromorbit.com>,
	Roman Gushchin <roman.gushchin@linux.dev>,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org
Subject: Re: [PATCH v2 7/7] mm: switch deferred split shrinker to list_lru
Date: Fri, 20 Mar 2026 12:07:09 -0400	[thread overview]
Message-ID: <ab1wrRdT-4GfWbcH@cmpxchg.org> (raw)
In-Reply-To: <e9631520-857f-4f67-8944-6af7a2b47e89@kernel.org>

On Thu, Mar 19, 2026 at 08:21:21AM +0100, David Hildenbrand (Arm) wrote:
> Of course :) If list_lru lock helpers would be the right thing to do, it
> might be better placed in this series.

I think this is slightly more promising. See below. The callsites in
huge_memory.c look nicer. But the double folio_nid() and folio_memcg()
lookups (when the caller needs them too) are kind of unfortunate; and
it feels like a lot of API for 4 callsites. Thoughts?

 include/linux/list_lru.h |  8 ++++++++
 mm/huge_memory.c         | 43 +++++++++++++++----------------------------
 mm/list_lru.c            | 29 +++++++++++++++++++++++++++++
 3 files changed, 52 insertions(+), 28 deletions(-)

diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index 4bd29b61c59a..6b734d08fa1b 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -123,6 +123,14 @@ struct list_lru_one *list_lru_lock_irqsave(struct list_lru *lru, int nid,
 void list_lru_unlock_irqrestore(struct list_lru_one *l,
 		unsigned long *irq_flags);
 
+struct list_lru_one *folio_list_lru_lock(struct folio *folio,
+		struct list_lru *lru);
+void folio_list_lru_unlock(struct folio *folio, struct list_lru_one *l);
+struct list_lru_one *folio_list_lru_lock_irqsave(struct folio *folio,
+		struct list_lru *lru, unsigned long *flags);
+void folio_list_lru_unlock_irqrestore(struct folio *folio,
+		struct list_lru_one *l, unsigned long *flags);
+
 /* Caller-locked variants, see list_lru_add() etc for documentation */
 bool __list_lru_add(struct list_lru *lru, struct list_lru_one *l,
 		struct list_head *item, int nid, struct mem_cgroup *memcg);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e90d08db219d..6996ef224e24 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3768,11 +3768,8 @@ static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int n
 	VM_WARN_ON_ONCE(!mapping && end);
 	/* Prevent deferred_split_scan() touching ->_refcount */
 	dequeue_deferred = folio_test_anon(folio) && old_order > 1;
-	if (dequeue_deferred) {
-		rcu_read_lock();
-		l = list_lru_lock(&deferred_split_lru,
-				  folio_nid(folio), folio_memcg(folio));
-	}
+	if (dequeue_deferred)
+		l = folio_list_lru_lock(folio, &deferred_split_lru);
 	if (folio_ref_freeze(folio, folio_cache_ref_count(folio) + 1)) {
 		struct swap_cluster_info *ci = NULL;
 		struct lruvec *lruvec;
@@ -3785,8 +3782,7 @@ static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int n
 				mod_mthp_stat(old_order,
 					MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
 			}
-			list_lru_unlock(l);
-			rcu_read_unlock();
+			folio_list_lru_unlock(folio, l);
 		}
 
 		if (mapping) {
@@ -3889,10 +3885,8 @@ static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int n
 		if (ci)
 			swap_cluster_unlock(ci);
 	} else {
-		if (dequeue_deferred) {
-			list_lru_unlock(l);
-			rcu_read_unlock();
-		}
+		if (dequeue_deferred)
+			folio_list_lru_unlock(folio, l);
 		return -EAGAIN;
 	}
 
@@ -4276,8 +4270,7 @@ bool __folio_unqueue_deferred_split(struct folio *folio)
 	WARN_ON_ONCE(folio_ref_count(folio));
 	WARN_ON_ONCE(!mem_cgroup_disabled() && !folio_memcg_charged(folio));
 
-	rcu_read_lock();
-	l = list_lru_lock_irqsave(&deferred_split_lru, nid, folio_memcg(folio), &flags);
+	l = folio_list_lru_lock_irqsave(folio, &deferred_split_lru, &flags);
 	if (__list_lru_del(&deferred_split_lru, l, &folio->_deferred_list, nid)) {
 		if (folio_test_partially_mapped(folio)) {
 			folio_clear_partially_mapped(folio);
@@ -4286,7 +4279,7 @@ bool __folio_unqueue_deferred_split(struct folio *folio)
 		}
 		unqueued = true;
 	}
-	list_lru_unlock_irqrestore(l, &flags);
+	folio_list_lru_unlock_irqrestore(folio, l, &flags);
 	rcu_read_unlock();
 
 	return unqueued;	/* useful for debug warnings */
@@ -4297,7 +4290,6 @@ void deferred_split_folio(struct folio *folio, bool partially_mapped)
 {
 	struct list_lru_one *l;
 	int nid;
-	struct mem_cgroup *memcg;
 	unsigned long flags;
 
 	/*
@@ -4322,9 +4314,7 @@ void deferred_split_folio(struct folio *folio, bool partially_mapped)
 
 	nid = folio_nid(folio);
 
-	rcu_read_lock();
-	memcg = folio_memcg(folio);
-	l = list_lru_lock_irqsave(&deferred_split_lru, nid, memcg, &flags);
+	l = folio_list_lru_lock_irqsave(folio, &deferred_split_lru, &flags);
 	if (partially_mapped) {
 		if (!folio_test_partially_mapped(folio)) {
 			folio_set_partially_mapped(folio);
@@ -4337,9 +4327,9 @@ void deferred_split_folio(struct folio *folio, bool partially_mapped)
 		/* partially mapped folios cannot become non-partially mapped */
 		VM_WARN_ON_FOLIO(folio_test_partially_mapped(folio), folio);
 	}
-	__list_lru_add(&deferred_split_lru, l, &folio->_deferred_list, nid, memcg);
-	list_lru_unlock_irqrestore(l, &flags);
-	rcu_read_unlock();
+	__list_lru_add(&deferred_split_lru, l, &folio->_deferred_list, nid,
+		       folio_memcg(folio));
+	folio_list_lru_unlock_irqrestore(folio, l, &flags);
 }
 
 static unsigned long deferred_split_count(struct shrinker *shrink,
@@ -4445,16 +4435,13 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
 		 * don't add it back to split_queue.
 		 */
 		if (!did_split && folio_test_partially_mapped(folio)) {
-			rcu_read_lock();
-			l = list_lru_lock_irqsave(&deferred_split_lru,
-						  folio_nid(folio),
-						  folio_memcg(folio),
-						  &flags);
+			l = folio_list_lru_lock_irqsave(folio,
+							&deferred_split_lru,
+							&flags);
 			__list_lru_add(&deferred_split_lru, l,
 				       &folio->_deferred_list,
 				       folio_nid(folio), folio_memcg(folio));
-			list_lru_unlock_irqrestore(l, &flags);
-			rcu_read_unlock();
+			folio_list_lru_unlock_irqrestore(folio, l, &flags);
 		}
 		folio_put(folio);
 	}
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 1ccdd45b1d14..8d50741ef18d 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -173,6 +173,35 @@ void list_lru_unlock_irqrestore(struct list_lru_one *l, unsigned long *flags)
 	unlock_list_lru(l, /*irq_off=*/true, /*irq_flags=*/flags);
 }
 
+struct list_lru_one *folio_list_lru_lock(struct folio *folio, struct list_lru *lru)
+{
+	rcu_read_lock();
+	return list_lru_lock(lru, folio_nid(folio), folio_memcg(folio));
+}
+
+void folio_list_lru_unlock(struct folio *folio, struct list_lru_one *l)
+{
+	list_lru_unlock(l);
+	rcu_read_unlock();
+}
+
+struct list_lru_one *folio_list_lru_lock_irqsave(struct folio *folio,
+						 struct list_lru *lru,
+						 unsigned long *flags)
+{
+	rcu_read_lock();
+	return list_lru_lock_irqsave(lru, folio_nid(folio),
+				     folio_memcg(folio), flags);
+}
+
+void folio_list_lru_unlock_irqrestore(struct folio *folio,
+				      struct list_lru_one *l,
+				      unsigned long *flags)
+{
+	list_lru_unlock_irqrestore(l, flags);
+	rcu_read_unlock();
+}
+
 bool __list_lru_add(struct list_lru *lru, struct list_lru_one *l,
 		    struct list_head *item, int nid,
 		    struct mem_cgroup *memcg)

  parent reply	other threads:[~2026-03-20 16:07 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-03-12 20:51 [PATCH v2 0/7] mm: switch THP shrinker to list_lru Johannes Weiner
2026-03-12 20:51 ` [PATCH v2 1/7] mm: list_lru: lock_list_lru_of_memcg() cannot return NULL if !skip_empty Johannes Weiner
2026-03-17  9:43   ` David Hildenbrand (Arm)
2026-03-18 17:56   ` Shakeel Butt
2026-03-18 19:25     ` Johannes Weiner
2026-03-18 19:34       ` Shakeel Butt
2026-03-12 20:51 ` [PATCH v2 2/7] mm: list_lru: deduplicate unlock_list_lru() Johannes Weiner
2026-03-17  9:44   ` David Hildenbrand (Arm)
2026-03-18 17:57   ` Shakeel Butt
2026-03-12 20:51 ` [PATCH v2 3/7] mm: list_lru: move list dead check to lock_list_lru_of_memcg() Johannes Weiner
2026-03-17  9:47   ` David Hildenbrand (Arm)
2026-03-12 20:51 ` [PATCH v2 4/7] mm: list_lru: deduplicate lock_list_lru() Johannes Weiner
2026-03-17  9:51   ` David Hildenbrand (Arm)
2026-03-12 20:51 ` [PATCH v2 5/7] mm: list_lru: introduce caller locking for additions and deletions Johannes Weiner
2026-03-17 10:00   ` David Hildenbrand (Arm)
2026-03-17 14:03     ` Johannes Weiner
2026-03-17 14:34       ` Johannes Weiner
2026-03-17 16:35         ` David Hildenbrand (Arm)
2026-03-12 20:51 ` [PATCH v2 6/7] mm: list_lru: introduce memcg_list_lru_alloc_folio() Johannes Weiner
2026-03-17 10:09   ` David Hildenbrand (Arm)
2026-03-12 20:51 ` [PATCH v2 7/7] mm: switch deferred split shrinker to list_lru Johannes Weiner
2026-03-18 20:25   ` David Hildenbrand (Arm)
2026-03-18 22:48     ` Johannes Weiner
2026-03-19  7:21       ` David Hildenbrand (Arm)
2026-03-20 16:02         ` Johannes Weiner
2026-03-23 19:39           ` David Hildenbrand (Arm)
2026-03-20 16:07         ` Johannes Weiner [this message]
2026-03-23 19:32           ` David Hildenbrand (Arm)
2026-03-13 17:39 ` [syzbot ci] Re: mm: switch THP " syzbot ci
2026-03-13 23:08   ` Johannes Weiner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ab1wrRdT-4GfWbcH@cmpxchg.org \
    --to=hannes@cmpxchg.org \
    --cc=Liam.Howlett@oracle.com \
    --cc=akpm@linux-foundation.org \
    --cc=david@fromorbit.com \
    --cc=david@kernel.org \
    --cc=kas@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=roman.gushchin@linux.dev \
    --cc=shakeel.butt@linux.dev \
    --cc=usama.arif@linux.dev \
    --cc=yosry.ahmed@linux.dev \
    --cc=ziy@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox