From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: akpm@linuxfoundation.org, linux-mm@kvack.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Subject: [PATCH v2 07/26] swap: Turn get_swap_page() into folio_alloc_swap()
Date: Wed, 4 May 2022 19:28:38 +0100 [thread overview]
Message-ID: <20220504182857.4013401-8-willy@infradead.org> (raw)
In-Reply-To: <20220504182857.4013401-1-willy@infradead.org>
This removes an assumption that a large folio is HPAGE_PMD_NR pages
in size.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
include/linux/swap.h | 13 +++++++------
mm/memcontrol.c | 16 ++++++++--------
mm/shmem.c | 3 ++-
mm/swap_slots.c | 14 +++++++-------
mm/swap_state.c | 3 ++-
mm/swapfile.c | 17 +++++++++--------
6 files changed, 35 insertions(+), 31 deletions(-)
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 27093b477c5f..147a9a173508 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -494,7 +494,7 @@ static inline long get_nr_swap_pages(void)
}
extern void si_swapinfo(struct sysinfo *);
-extern swp_entry_t get_swap_page(struct page *page);
+swp_entry_t folio_alloc_swap(struct folio *folio);
extern void put_swap_page(struct page *page, swp_entry_t entry);
extern swp_entry_t get_swap_page_of_type(int);
extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size);
@@ -685,7 +685,7 @@ static inline int try_to_free_swap(struct page *page)
return 0;
}
-static inline swp_entry_t get_swap_page(struct page *page)
+static inline swp_entry_t folio_alloc_swap(struct folio *folio)
{
swp_entry_t entry;
entry.val = 0;
@@ -739,12 +739,13 @@ static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
#ifdef CONFIG_MEMCG_SWAP
void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry);
-extern int __mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry);
-static inline int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
+int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry);
+static inline int mem_cgroup_try_charge_swap(struct folio *folio,
+ swp_entry_t entry)
{
if (mem_cgroup_disabled())
return 0;
- return __mem_cgroup_try_charge_swap(page, entry);
+ return __mem_cgroup_try_charge_swap(folio, entry);
}
extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
@@ -762,7 +763,7 @@ static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
{
}
-static inline int mem_cgroup_try_charge_swap(struct page *page,
+static inline int mem_cgroup_try_charge_swap(struct folio *folio,
swp_entry_t entry)
{
return 0;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 598fece89e2b..985eff804004 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -7125,17 +7125,17 @@ void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
}
/**
- * __mem_cgroup_try_charge_swap - try charging swap space for a page
- * @page: page being added to swap
+ * __mem_cgroup_try_charge_swap - try charging swap space for a folio
+ * @folio: folio being added to swap
* @entry: swap entry to charge
*
- * Try to charge @page's memcg for the swap space at @entry.
+ * Try to charge @folio's memcg for the swap space at @entry.
*
* Returns 0 on success, -ENOMEM on failure.
*/
-int __mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
+int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
{
- unsigned int nr_pages = thp_nr_pages(page);
+ unsigned int nr_pages = folio_nr_pages(folio);
struct page_counter *counter;
struct mem_cgroup *memcg;
unsigned short oldid;
@@ -7143,9 +7143,9 @@ int __mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
return 0;
- memcg = page_memcg(page);
+ memcg = folio_memcg(folio);
- VM_WARN_ON_ONCE_PAGE(!memcg, page);
+ VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
if (!memcg)
return 0;
@@ -7168,7 +7168,7 @@ int __mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
if (nr_pages > 1)
mem_cgroup_id_get_many(memcg, nr_pages - 1);
oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
- VM_BUG_ON_PAGE(oldid, page);
+ VM_BUG_ON_FOLIO(oldid, folio);
mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
return 0;
diff --git a/mm/shmem.c b/mm/shmem.c
index c89394221a7e..85c23696efc6 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1312,6 +1312,7 @@ int shmem_unuse(unsigned int type)
*/
static int shmem_writepage(struct page *page, struct writeback_control *wbc)
{
+ struct folio *folio = page_folio(page);
struct shmem_inode_info *info;
struct address_space *mapping;
struct inode *inode;
@@ -1385,7 +1386,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
SetPageUptodate(page);
}
- swap = get_swap_page(page);
+ swap = folio_alloc_swap(folio);
if (!swap.val)
goto redirty;
diff --git a/mm/swap_slots.c b/mm/swap_slots.c
index 2b5531840583..0218ec1cd24c 100644
--- a/mm/swap_slots.c
+++ b/mm/swap_slots.c
@@ -117,7 +117,7 @@ static int alloc_swap_slot_cache(unsigned int cpu)
/*
* Do allocation outside swap_slots_cache_mutex
- * as kvzalloc could trigger reclaim and get_swap_page,
+ * as kvzalloc could trigger reclaim and folio_alloc_swap,
* which can lock swap_slots_cache_mutex.
*/
slots = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t),
@@ -213,7 +213,7 @@ static void __drain_swap_slots_cache(unsigned int type)
* this function can be invoked in the cpu
* hot plug path:
* cpu_up -> lock cpu_hotplug -> cpu hotplug state callback
- * -> memory allocation -> direct reclaim -> get_swap_page
+ * -> memory allocation -> direct reclaim -> folio_alloc_swap
* -> drain_swap_slots_cache
*
* Hence the loop over current online cpu below could miss cpu that
@@ -301,16 +301,16 @@ int free_swap_slot(swp_entry_t entry)
return 0;
}
-swp_entry_t get_swap_page(struct page *page)
+swp_entry_t folio_alloc_swap(struct folio *folio)
{
swp_entry_t entry;
struct swap_slots_cache *cache;
entry.val = 0;
- if (PageTransHuge(page)) {
+ if (folio_test_large(folio)) {
if (IS_ENABLED(CONFIG_THP_SWAP))
- get_swap_pages(1, &entry, HPAGE_PMD_NR);
+ get_swap_pages(1, &entry, folio_nr_pages(folio));
goto out;
}
@@ -344,8 +344,8 @@ swp_entry_t get_swap_page(struct page *page)
get_swap_pages(1, &entry, 1);
out:
- if (mem_cgroup_try_charge_swap(page, entry)) {
- put_swap_page(page, entry);
+ if (mem_cgroup_try_charge_swap(folio, entry)) {
+ put_swap_page(&folio->page, entry);
entry.val = 0;
}
return entry;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 013856004825..989ad18f5468 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -183,13 +183,14 @@ void __delete_from_swap_cache(struct page *page,
*/
int add_to_swap(struct page *page)
{
+ struct folio *folio = page_folio(page);
swp_entry_t entry;
int err;
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(!PageUptodate(page), page);
- entry = get_swap_page(page);
+ entry = folio_alloc_swap(folio);
if (!entry.val)
return 0;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 63c61f8b2611..c34f41553144 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -76,9 +76,9 @@ static PLIST_HEAD(swap_active_head);
/*
* all available (active, not full) swap_info_structs
* protected with swap_avail_lock, ordered by priority.
- * This is used by get_swap_page() instead of swap_active_head
+ * This is used by folio_alloc_swap() instead of swap_active_head
* because swap_active_head includes all swap_info_structs,
- * but get_swap_page() doesn't need to look at full ones.
+ * but folio_alloc_swap() doesn't need to look at full ones.
* This uses its own lock instead of swap_lock because when a
* swap_info_struct changes between not-full/full, it needs to
* add/remove itself to/from this list, but the swap_info_struct->lock
@@ -2093,11 +2093,12 @@ static int try_to_unuse(unsigned int type)
* Under global memory pressure, swap entries can be reinserted back
* into process space after the mmlist loop above passes over them.
*
- * Limit the number of retries? No: when mmget_not_zero() above fails,
- * that mm is likely to be freeing swap from exit_mmap(), which proceeds
- * at its own independent pace; and even shmem_writepage() could have
- * been preempted after get_swap_page(), temporarily hiding that swap.
- * It's easy and robust (though cpu-intensive) just to keep retrying.
+ * Limit the number of retries? No: when mmget_not_zero()
+ * above fails, that mm is likely to be freeing swap from
+ * exit_mmap(), which proceeds at its own independent pace;
+ * and even shmem_writepage() could have been preempted after
+ * folio_alloc_swap(), temporarily hiding that swap. It's easy
+ * and robust (though cpu-intensive) just to keep retrying.
*/
if (READ_ONCE(si->inuse_pages)) {
if (!signal_pending(current))
@@ -2310,7 +2311,7 @@ static void _enable_swap_info(struct swap_info_struct *p)
* which on removal of any swap_info_struct with an auto-assigned
* (i.e. negative) priority increments the auto-assigned priority
* of any lower-priority swap_info_structs.
- * swap_avail_head needs to be priority ordered for get_swap_page(),
+ * swap_avail_head needs to be priority ordered for folio_alloc_swap(),
* which allocates swap pages from the highest available priority
* swap_info_struct.
*/
--
2.34.1
next prev parent reply other threads:[~2022-05-04 18:29 UTC|newest]
Thread overview: 58+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-05-04 18:28 [PATCH v2 00/26] Folio patches for 5.19 Matthew Wilcox (Oracle)
2022-05-04 18:28 ` [PATCH v2 01/26] shmem: Convert shmem_alloc_hugepage() to use vma_alloc_folio() Matthew Wilcox (Oracle)
2022-05-05 15:30 ` Christoph Hellwig
2022-05-05 17:29 ` Zi Yan
2022-05-04 18:28 ` [PATCH v2 02/26] mm/huge_memory: Convert do_huge_pmd_anonymous_page() " Matthew Wilcox (Oracle)
2022-05-05 15:31 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 03/26] alpha: Fix alloc_zeroed_user_highpage_movable() Matthew Wilcox (Oracle)
2022-05-05 15:31 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 04/26] mm: Remove alloc_pages_vma() Matthew Wilcox (Oracle)
2022-05-05 15:34 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 05/26] vmscan: Use folio_mapped() in shrink_page_list() Matthew Wilcox (Oracle)
2022-05-05 15:34 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 06/26] vmscan: Convert the writeback handling in shrink_page_list() to folios Matthew Wilcox (Oracle)
2022-05-05 15:35 ` Christoph Hellwig
2022-05-04 18:28 ` Matthew Wilcox (Oracle) [this message]
2022-05-05 15:35 ` [PATCH v2 07/26] swap: Turn get_swap_page() into folio_alloc_swap() Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 08/26] swap: Convert add_to_swap() to take a folio Matthew Wilcox (Oracle)
2022-05-05 15:35 ` Christoph Hellwig
2022-05-06 1:21 ` Andrew Morton
2022-05-06 1:39 ` Matthew Wilcox
2022-05-04 18:28 ` [PATCH v2 09/26] vmscan: Convert dirty page handling to folios Matthew Wilcox (Oracle)
2022-05-05 15:36 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 10/26] vmscan: Convert page buffer handling to use folios Matthew Wilcox (Oracle)
2022-05-05 15:36 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 11/26] vmscan: Convert lazy freeing to folios Matthew Wilcox (Oracle)
2022-05-05 15:37 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 12/26] vmscan: Move initialisation of mapping down Matthew Wilcox (Oracle)
2022-05-05 15:37 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 13/26] vmscan: Convert the activate_locked portion of shrink_page_list to folios Matthew Wilcox (Oracle)
2022-05-05 15:37 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 14/26] mm: Allow can_split_folio() to be called when THP are disabled Matthew Wilcox (Oracle)
2022-05-05 15:38 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 15/26] vmscan: Remove remaining uses of page in shrink_page_list Matthew Wilcox (Oracle)
2022-05-05 15:38 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 16/26] mm/shmem: Use a folio in shmem_unused_huge_shrink Matthew Wilcox (Oracle)
2022-05-05 15:38 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 17/26] mm/swap: Add folio_throttle_swaprate Matthew Wilcox (Oracle)
2022-05-05 15:39 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 18/26] mm/shmem: Convert shmem_add_to_page_cache to take a folio Matthew Wilcox (Oracle)
2022-05-05 15:39 ` Christoph Hellwig
2022-05-11 3:06 ` Mike Kravetz
2022-05-11 3:25 ` Matthew Wilcox
2022-05-04 18:28 ` [PATCH v2 19/26] mm/shmem: Turn shmem_should_replace_page into shmem_should_replace_folio Matthew Wilcox (Oracle)
2022-05-05 15:40 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 20/26] mm/shmem: Add shmem_alloc_folio() Matthew Wilcox (Oracle)
2022-05-05 15:40 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 21/26] mm/shmem: Convert shmem_alloc_and_acct_page to use a folio Matthew Wilcox (Oracle)
2022-05-05 15:40 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 22/26] mm/shmem: Convert shmem_getpage_gfp " Matthew Wilcox (Oracle)
2022-05-05 15:41 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 23/26] mm/shmem: Convert shmem_swapin_page() to shmem_swapin_folio() Matthew Wilcox (Oracle)
2022-05-05 15:41 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 24/26] mm: Add folio_mapping_flags() Matthew Wilcox (Oracle)
2022-05-05 15:41 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 25/26] mm: Add folio_test_movable() Matthew Wilcox (Oracle)
2022-05-05 15:42 ` Christoph Hellwig
2022-05-04 18:28 ` [PATCH v2 26/26] mm/migrate: Convert move_to_new_page() into move_to_new_folio() Matthew Wilcox (Oracle)
2022-05-05 15:42 ` Christoph Hellwig
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220504182857.4013401-8-willy@infradead.org \
--to=willy@infradead.org \
--cc=akpm@linuxfoundation.org \
--cc=linux-mm@kvack.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).