From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>, linux-mm@kvack.org
Subject: [PATCH v3 08/18] mm: use __page_cache_release() in folios_put()
Date: Tue, 27 Feb 2024 17:42:42 +0000 [thread overview]
Message-ID: <20240227174254.710559-9-willy@infradead.org> (raw)
In-Reply-To: <20240227174254.710559-1-willy@infradead.org>
Pass a pointer to the lruvec so we can take advantage of the
folio_lruvec_relock_irqsave(). Adjust the calling convention of
folio_lruvec_relock_irqsave() to suit and add a page_cache_release()
wrapper.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
include/linux/memcontrol.h | 16 +++++-----
mm/swap.c | 62 ++++++++++++++++++--------------------
2 files changed, 37 insertions(+), 41 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 46d9abb20761..8a0e8972a3d3 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1705,18 +1705,18 @@ static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio,
return folio_lruvec_lock_irq(folio);
}
-/* Don't lock again iff page's lruvec locked */
-static inline struct lruvec *folio_lruvec_relock_irqsave(struct folio *folio,
- struct lruvec *locked_lruvec, unsigned long *flags)
+/* Don't lock again iff folio's lruvec locked */
+static inline void folio_lruvec_relock_irqsave(struct folio *folio,
+ struct lruvec **lruvecp, unsigned long *flags)
{
- if (locked_lruvec) {
- if (folio_matches_lruvec(folio, locked_lruvec))
- return locked_lruvec;
+ if (*lruvecp) {
+ if (folio_matches_lruvec(folio, *lruvecp))
+ return;
- unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
+ unlock_page_lruvec_irqrestore(*lruvecp, *flags);
}
- return folio_lruvec_lock_irqsave(folio, flags);
+ *lruvecp = folio_lruvec_lock_irqsave(folio, flags);
}
#ifdef CONFIG_CGROUP_WRITEBACK
diff --git a/mm/swap.c b/mm/swap.c
index ad3f2e9448a4..dce5ea67ae05 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -74,22 +74,21 @@ static DEFINE_PER_CPU(struct cpu_fbatches, cpu_fbatches) = {
.lock = INIT_LOCAL_LOCK(lock),
};
-/*
- * This path almost never happens for VM activity - pages are normally freed
- * in batches. But it gets used by networking - and for compound pages.
- */
-static void __page_cache_release(struct folio *folio)
+static void __page_cache_release(struct folio *folio, struct lruvec **lruvecp,
+ unsigned long *flagsp)
{
if (folio_test_lru(folio)) {
- struct lruvec *lruvec;
- unsigned long flags;
-
- lruvec = folio_lruvec_lock_irqsave(folio, &flags);
- lruvec_del_folio(lruvec, folio);
+ folio_lruvec_relock_irqsave(folio, lruvecp, flagsp);
+ lruvec_del_folio(*lruvecp, folio);
__folio_clear_lru_flags(folio);
- unlock_page_lruvec_irqrestore(lruvec, flags);
}
- /* See comment on folio_test_mlocked in folios_put() */
+
+ /*
+ * In rare cases, when truncation or holepunching raced with
+ * munlock after VM_LOCKED was cleared, Mlocked may still be
+ * found set here. This does not indicate a problem, unless
+ * "unevictable_pgs_cleared" appears worryingly large.
+ */
if (unlikely(folio_test_mlocked(folio))) {
long nr_pages = folio_nr_pages(folio);
@@ -99,9 +98,23 @@ static void __page_cache_release(struct folio *folio)
}
}
+/*
+ * This path almost never happens for VM activity - pages are normally freed
+ * in batches. But it gets used by networking - and for compound pages.
+ */
+static void page_cache_release(struct folio *folio)
+{
+ struct lruvec *lruvec = NULL;
+ unsigned long flags;
+
+ __page_cache_release(folio, &lruvec, &flags);
+ if (lruvec)
+ unlock_page_lruvec_irqrestore(lruvec, flags);
+}
+
static void __folio_put_small(struct folio *folio)
{
- __page_cache_release(folio);
+ page_cache_release(folio);
mem_cgroup_uncharge(folio);
free_unref_page(&folio->page, 0);
}
@@ -115,7 +128,7 @@ static void __folio_put_large(struct folio *folio)
* be called for hugetlb (it has a separate hugetlb_cgroup.)
*/
if (!folio_test_hugetlb(folio))
- __page_cache_release(folio);
+ page_cache_release(folio);
destroy_large_folio(folio);
}
@@ -216,7 +229,7 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
if (move_fn != lru_add_fn && !folio_test_clear_lru(folio))
continue;
- lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
+ folio_lruvec_relock_irqsave(folio, &lruvec, &flags);
move_fn(lruvec, folio);
folio_set_lru(folio);
@@ -999,24 +1012,7 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
continue;
}
- if (folio_test_lru(folio)) {
- lruvec = folio_lruvec_relock_irqsave(folio, lruvec,
- &flags);
- lruvec_del_folio(lruvec, folio);
- __folio_clear_lru_flags(folio);
- }
-
- /*
- * In rare cases, when truncation or holepunching raced with
- * munlock after VM_LOCKED was cleared, Mlocked may still be
- * found set here. This does not indicate a problem, unless
- * "unevictable_pgs_cleared" appears worryingly large.
- */
- if (unlikely(folio_test_mlocked(folio))) {
- __folio_clear_mlocked(folio);
- zone_stat_sub_folio(folio, NR_MLOCK);
- count_vm_event(UNEVICTABLE_PGCLEARED);
- }
+ __page_cache_release(folio, &lruvec, &flags);
if (j != i)
folios->folios[j] = folio;
--
2.43.0
next prev parent reply other threads:[~2024-02-27 17:43 UTC|newest]
Thread overview: 73+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-02-27 17:42 [PATCH v3 00/18] Rearrange batched folio freeing Matthew Wilcox (Oracle)
2024-02-27 17:42 ` [PATCH v3 01/18] mm: Make folios_put() the basis of release_pages() Matthew Wilcox (Oracle)
2024-02-27 17:42 ` [PATCH v3 02/18] mm: Convert free_unref_page_list() to use folios Matthew Wilcox (Oracle)
2024-02-27 17:42 ` [PATCH v3 03/18] mm: Add free_unref_folios() Matthew Wilcox (Oracle)
2024-02-27 17:42 ` [PATCH v3 04/18] mm: Use folios_put() in __folio_batch_release() Matthew Wilcox (Oracle)
2024-02-27 17:42 ` [PATCH v3 05/18] memcg: Add mem_cgroup_uncharge_folios() Matthew Wilcox (Oracle)
2024-02-27 17:42 ` [PATCH v3 06/18] mm: Remove use of folio list from folios_put() Matthew Wilcox (Oracle)
2024-02-27 17:42 ` [PATCH v3 07/18] mm: Use free_unref_folios() in put_pages_list() Matthew Wilcox (Oracle)
2024-02-27 17:42 ` Matthew Wilcox (Oracle) [this message]
2024-02-27 17:42 ` [PATCH v3 09/18] mm: Handle large folios in free_unref_folios() Matthew Wilcox (Oracle)
2024-02-27 17:42 ` [PATCH v3 10/18] mm: Allow non-hugetlb large folios to be batch processed Matthew Wilcox (Oracle)
2024-03-06 13:42 ` Ryan Roberts
2024-03-06 16:09 ` Matthew Wilcox
2024-03-06 16:19 ` Ryan Roberts
2024-03-06 17:41 ` Ryan Roberts
2024-03-06 18:41 ` Zi Yan
2024-03-06 19:55 ` Matthew Wilcox
2024-03-06 21:55 ` Matthew Wilcox
2024-03-07 8:56 ` Ryan Roberts
2024-03-07 13:50 ` Yin, Fengwei
2024-03-07 14:05 ` Re: Matthew Wilcox
2024-03-07 15:24 ` Re: Ryan Roberts
2024-03-07 16:24 ` Re: Ryan Roberts
2024-03-07 23:02 ` Re: Matthew Wilcox
2024-03-08 1:06 ` Re: Yin, Fengwei
2024-03-07 17:33 ` [PATCH v3 10/18] mm: Allow non-hugetlb large folios to be batch processed Matthew Wilcox
2024-03-07 18:35 ` Ryan Roberts
2024-03-07 20:42 ` Matthew Wilcox
2024-03-08 11:44 ` Ryan Roberts
2024-03-08 12:09 ` Ryan Roberts
2024-03-08 14:21 ` Ryan Roberts
2024-03-08 15:11 ` Matthew Wilcox
2024-03-08 16:03 ` Matthew Wilcox
2024-03-08 17:13 ` Ryan Roberts
2024-03-08 18:09 ` Ryan Roberts
2024-03-08 18:18 ` Matthew Wilcox
2024-03-09 4:34 ` Andrew Morton
2024-03-09 4:52 ` Matthew Wilcox
2024-03-09 8:05 ` Ryan Roberts
2024-03-09 12:33 ` Ryan Roberts
2024-03-10 13:38 ` Matthew Wilcox
2024-03-08 15:33 ` Matthew Wilcox
2024-03-09 6:09 ` Matthew Wilcox
2024-03-09 7:59 ` Ryan Roberts
2024-03-09 8:18 ` Ryan Roberts
2024-03-09 9:38 ` Ryan Roberts
2024-03-10 4:23 ` Matthew Wilcox
2024-03-10 8:23 ` Ryan Roberts
2024-03-10 11:08 ` Matthew Wilcox
2024-03-10 11:01 ` Ryan Roberts
2024-03-10 11:11 ` Matthew Wilcox
2024-03-10 16:31 ` Ryan Roberts
2024-03-10 19:57 ` Matthew Wilcox
2024-03-10 19:59 ` Ryan Roberts
2024-03-10 20:46 ` Matthew Wilcox
2024-03-10 21:52 ` Matthew Wilcox
2024-03-11 9:01 ` Ryan Roberts
2024-03-11 12:26 ` Matthew Wilcox
2024-03-11 12:36 ` Ryan Roberts
2024-03-11 15:50 ` Matthew Wilcox
2024-03-11 16:14 ` Ryan Roberts
2024-03-11 17:49 ` Matthew Wilcox
2024-03-12 11:57 ` Ryan Roberts
2024-03-11 19:26 ` Matthew Wilcox
2024-03-10 11:14 ` Ryan Roberts
2024-02-27 17:42 ` [PATCH v3 11/18] mm: Free folios in a batch in shrink_folio_list() Matthew Wilcox (Oracle)
2024-02-27 17:42 ` [PATCH v3 12/18] mm: Free folios directly in move_folios_to_lru() Matthew Wilcox (Oracle)
2024-02-27 17:42 ` [PATCH v3 13/18] memcg: Remove mem_cgroup_uncharge_list() Matthew Wilcox (Oracle)
2024-02-27 17:42 ` [PATCH v3 14/18] mm: Remove free_unref_page_list() Matthew Wilcox (Oracle)
2024-02-27 17:42 ` [PATCH v3 15/18] mm: Remove lru_to_page() Matthew Wilcox (Oracle)
2024-02-27 17:42 ` [PATCH v3 16/18] mm: Convert free_pages_and_swap_cache() to use folios_put() Matthew Wilcox (Oracle)
2024-02-27 17:42 ` [PATCH v3 17/18] mm: Use a folio in __collapse_huge_page_copy_succeeded() Matthew Wilcox (Oracle)
2024-02-27 17:42 ` [PATCH v3 18/18] mm: Convert free_swap_cache() to take a folio Matthew Wilcox (Oracle)
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240227174254.710559-9-willy@infradead.org \
--to=willy@infradead.org \
--cc=akpm@linux-foundation.org \
--cc=linux-mm@kvack.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).