From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: linux-mm@kvack.org, cgroups@vger.kernel.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
Johannes Weiner <hannes@cmpxchg.org>,
Michal Hocko <mhocko@kernel.org>,
Vladimir Davydov <vdavydov.dev@gmail.com>
Subject: [PATCH v3 11/18] mm/memcg: Convert mem_cgroup_migrate() to take folios
Date: Wed, 30 Jun 2021 05:00:27 +0100 [thread overview]
Message-ID: <20210630040034.1155892-12-willy@infradead.org> (raw)
In-Reply-To: <20210630040034.1155892-1-willy@infradead.org>
Convert all callers of mem_cgroup_migrate() to call page_folio() first.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
include/linux/memcontrol.h | 4 ++--
mm/filemap.c | 4 +++-
mm/memcontrol.c | 35 +++++++++++++++++------------------
mm/migrate.c | 4 +++-
mm/shmem.c | 5 ++++-
5 files changed, 29 insertions(+), 23 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index d6386a2b9d7a..2c57a405acd2 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -713,7 +713,7 @@ void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
void mem_cgroup_uncharge(struct folio *);
void mem_cgroup_uncharge_list(struct list_head *page_list);
-void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
+void mem_cgroup_migrate(struct folio *old, struct folio *new);
/**
* mem_cgroup_lruvec - get the lru list vector for a memcg & node
@@ -1210,7 +1210,7 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
{
}
-static inline void mem_cgroup_migrate(struct page *old, struct page *new)
+static inline void mem_cgroup_migrate(struct folio *old, struct folio *new)
{
}
diff --git a/mm/filemap.c b/mm/filemap.c
index 0008ada132c4..964f1643dd97 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -817,6 +817,8 @@ EXPORT_SYMBOL(file_write_and_wait_range);
*/
void replace_page_cache_page(struct page *old, struct page *new)
{
+ struct folio *fold = page_folio(old);
+ struct folio *fnew = page_folio(new);
struct address_space *mapping = old->mapping;
void (*freepage)(struct page *) = mapping->a_ops->freepage;
pgoff_t offset = old->index;
@@ -831,7 +833,7 @@ void replace_page_cache_page(struct page *old, struct page *new)
new->mapping = mapping;
new->index = offset;
- mem_cgroup_migrate(old, new);
+ mem_cgroup_migrate(fold, fnew);
xas_lock_irqsave(&xas, flags);
xas_store(&xas, new);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 90a53f554371..4ce2f2eb81d8 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6936,36 +6936,35 @@ void mem_cgroup_uncharge_list(struct list_head *page_list)
}
/**
- * mem_cgroup_migrate - charge a page's replacement
- * @oldpage: currently circulating page
- * @newpage: replacement page
+ * mem_cgroup_migrate - Charge a folio's replacement.
+ * @old: Currently circulating folio.
+ * @new: Replacement folio.
*
- * Charge @newpage as a replacement page for @oldpage. @oldpage will
+ * Charge @new as a replacement folio for @old. @old will
* be uncharged upon free.
*
- * Both pages must be locked, @newpage->mapping must be set up.
+ * Both folios must be locked, @new->mapping must be set up.
*/
-void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
+void mem_cgroup_migrate(struct folio *old, struct folio *new)
{
- struct folio *newfolio = page_folio(newpage);
struct mem_cgroup *memcg;
- unsigned int nr_pages = folio_nr_pages(newfolio);
+ unsigned int nr_pages = folio_nr_pages(new);
unsigned long flags;
- VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
- VM_BUG_ON_FOLIO(!folio_locked(newfolio), newfolio);
- VM_BUG_ON_FOLIO(PageAnon(oldpage) != folio_anon(newfolio), newfolio);
- VM_BUG_ON_FOLIO(compound_nr(oldpage) != nr_pages, newfolio);
+ VM_BUG_ON_FOLIO(!folio_locked(old), old);
+ VM_BUG_ON_FOLIO(!folio_locked(new), new);
+ VM_BUG_ON_FOLIO(folio_anon(old) != folio_anon(new), new);
+ VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
if (mem_cgroup_disabled())
return;
- /* Page cache replacement: new page already charged? */
- if (folio_memcg(newfolio))
+ /* Page cache replacement: new folio already charged? */
+ if (folio_memcg(new))
return;
- memcg = page_memcg(oldpage);
- VM_WARN_ON_ONCE_PAGE(!memcg, oldpage);
+ memcg = folio_memcg(old);
+ VM_WARN_ON_ONCE_FOLIO(!memcg, old);
if (!memcg)
return;
@@ -6977,11 +6976,11 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
}
css_get(&memcg->css);
- commit_charge(newfolio, memcg);
+ commit_charge(new, memcg);
local_irq_save(flags);
mem_cgroup_charge_statistics(memcg, nr_pages);
- memcg_check_events(memcg, page_to_nid(newpage));
+ memcg_check_events(memcg, folio_nid(new));
local_irq_restore(flags);
}
diff --git a/mm/migrate.c b/mm/migrate.c
index 94efe09bb2a0..f71e72f9c812 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -582,6 +582,8 @@ static void copy_huge_page(struct page *dst, struct page *src)
*/
void migrate_page_states(struct page *newpage, struct page *page)
{
+ struct folio *folio = page_folio(page);
+ struct folio *newfolio = page_folio(newpage);
int cpupid;
if (PageError(page))
@@ -646,7 +648,7 @@ void migrate_page_states(struct page *newpage, struct page *page)
copy_page_owner(page, newpage);
if (!PageHuge(page))
- mem_cgroup_migrate(page, newpage);
+ mem_cgroup_migrate(folio, newfolio);
}
EXPORT_SYMBOL(migrate_page_states);
diff --git a/mm/shmem.c b/mm/shmem.c
index 3cc5ddd5cc6b..1e172fb40fb3 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1619,6 +1619,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
struct shmem_inode_info *info, pgoff_t index)
{
struct page *oldpage, *newpage;
+ struct folio *old, *new;
struct address_space *swap_mapping;
swp_entry_t entry;
pgoff_t swap_index;
@@ -1655,7 +1656,9 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
xa_lock_irq(&swap_mapping->i_pages);
error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage);
if (!error) {
- mem_cgroup_migrate(oldpage, newpage);
+ old = page_folio(oldpage);
+ new = page_folio(newpage);
+ mem_cgroup_migrate(old, new);
__inc_lruvec_page_state(newpage, NR_FILE_PAGES);
__dec_lruvec_page_state(oldpage, NR_FILE_PAGES);
}
--
2.30.2
next prev parent reply other threads:[~2021-06-30 4:08 UTC|newest]
Thread overview: 59+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-06-30 4:00 [PATCH v3 00/18] Folio conversion of memcg Matthew Wilcox (Oracle)
2021-06-30 4:00 ` [PATCH v3 01/18] mm: Add folio_nid() Matthew Wilcox (Oracle)
2021-07-01 6:56 ` Christoph Hellwig
2021-06-30 4:00 ` [PATCH v3 02/18] mm/memcg: Remove 'page' parameter to mem_cgroup_charge_statistics() Matthew Wilcox (Oracle)
2021-06-30 14:17 ` Johannes Weiner
2021-06-30 4:00 ` [PATCH v3 03/18] mm/memcg: Use the node id in mem_cgroup_update_tree() Matthew Wilcox (Oracle)
2021-06-30 6:55 ` Michal Hocko
2021-06-30 14:18 ` Johannes Weiner
2021-07-01 6:57 ` Christoph Hellwig
2021-06-30 4:00 ` [PATCH v3 04/18] mm/memcg: Remove soft_limit_tree_node() Matthew Wilcox (Oracle)
2021-06-30 6:56 ` Michal Hocko
2021-06-30 14:19 ` Johannes Weiner
2021-07-01 7:09 ` Christoph Hellwig
2021-06-30 4:00 ` [PATCH v3 05/18] mm/memcg: Convert memcg_check_events to take a node ID Matthew Wilcox (Oracle)
2021-06-30 6:58 ` Michal Hocko
2021-06-30 6:59 ` Michal Hocko
2021-07-01 7:09 ` Christoph Hellwig
2021-06-30 4:00 ` [PATCH v3 06/18] mm/memcg: Add folio_memcg() and related functions Matthew Wilcox (Oracle)
2021-06-30 6:53 ` kernel test robot
2021-07-01 7:12 ` Christoph Hellwig
2021-06-30 4:00 ` [PATCH v3 07/18] mm/memcg: Convert commit_charge() to take a folio Matthew Wilcox (Oracle)
2021-06-30 4:00 ` [PATCH v3 08/18] mm/memcg: Convert mem_cgroup_charge() " Matthew Wilcox (Oracle)
2021-06-30 7:17 ` kernel test robot
2021-07-01 7:13 ` Christoph Hellwig
2021-06-30 4:00 ` [PATCH v3 09/18] mm/memcg: Convert uncharge_page() to uncharge_folio() Matthew Wilcox (Oracle)
2021-07-01 7:15 ` Christoph Hellwig
2021-06-30 4:00 ` [PATCH v3 10/18] mm/memcg: Convert mem_cgroup_uncharge() to take a folio Matthew Wilcox (Oracle)
2021-06-30 8:46 ` kernel test robot
2021-07-01 7:17 ` Christoph Hellwig
2021-07-07 12:09 ` Matthew Wilcox
2021-06-30 4:00 ` Matthew Wilcox (Oracle) [this message]
2021-07-01 7:20 ` [PATCH v3 11/18] mm/memcg: Convert mem_cgroup_migrate() to take folios Christoph Hellwig
2021-06-30 4:00 ` [PATCH v3 12/18] mm/memcg: Convert mem_cgroup_track_foreign_dirty_slowpath() to folio Matthew Wilcox (Oracle)
2021-06-30 4:00 ` [PATCH v3 13/18] mm/memcg: Add folio_memcg_lock() and folio_memcg_unlock() Matthew Wilcox (Oracle)
2021-06-30 8:32 ` Michal Hocko
2021-07-07 15:10 ` Matthew Wilcox
2021-07-08 7:28 ` Michal Hocko
2021-07-07 17:08 ` Johannes Weiner
2021-07-07 19:28 ` Matthew Wilcox
2021-07-07 20:41 ` Johannes Weiner
2021-07-09 19:37 ` Matthew Wilcox
2021-06-30 4:00 ` [PATCH v3 14/18] mm/memcg: Convert mem_cgroup_move_account() to use a folio Matthew Wilcox (Oracle)
2021-06-30 8:30 ` Michal Hocko
2021-06-30 11:22 ` Matthew Wilcox
2021-06-30 12:20 ` Michal Hocko
2021-06-30 12:31 ` Matthew Wilcox
2021-06-30 12:45 ` Michal Hocko
2021-07-07 15:25 ` Matthew Wilcox
2021-07-08 7:30 ` Michal Hocko
2021-06-30 4:00 ` [PATCH v3 15/18] mm/memcg: Add mem_cgroup_folio_lruvec() Matthew Wilcox (Oracle)
2021-06-30 8:12 ` kernel test robot
2021-06-30 19:18 ` Matthew Wilcox
2021-06-30 21:21 ` Johannes Weiner
2021-06-30 4:00 ` [PATCH v3 16/18] mm/memcg: Add folio_lruvec_lock() and similar functions Matthew Wilcox (Oracle)
2021-06-30 8:36 ` Michal Hocko
2021-06-30 4:00 ` [PATCH v3 17/18] mm/memcg: Add folio_lruvec_relock_irq() and folio_lruvec_relock_irqsave() Matthew Wilcox (Oracle)
2021-06-30 8:39 ` Michal Hocko
2021-06-30 4:00 ` [PATCH v3 18/18] mm/workingset: Convert workingset_activation to take a folio Matthew Wilcox (Oracle)
2021-06-30 8:44 ` [PATCH v3 00/18] Folio conversion of memcg Michal Hocko
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210630040034.1155892-12-willy@infradead.org \
--to=willy@infradead.org \
--cc=cgroups@vger.kernel.org \
--cc=hannes@cmpxchg.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@kernel.org \
--cc=vdavydov.dev@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).