* [PATCH 1/4] mm/vmstat: use node_stat_add_folio/sub_folio for folio_nr_pages operations
[not found] <20260414091527.2970844-1-ye.liu@linux.dev>
@ 2026-04-14 9:15 ` Ye Liu
2026-04-14 17:52 ` David Hildenbrand (Arm)
0 siblings, 1 reply; 3+ messages in thread
From: Ye Liu @ 2026-04-14 9:15 UTC (permalink / raw)
To: Trond Myklebust, Anna Schumaker, Andrew Morton, Vlastimil Babka,
David Hildenbrand, Lorenzo Stoakes, Matthew Wilcox (Oracle),
Chris Li, Kairui Song
Cc: Ye Liu, Suren Baghdasaryan, Michal Hocko, Brendan Jackman,
Johannes Weiner, Zi Yan, Jason Gunthorpe, John Hubbard, Peter Xu,
Baolin Wang, Liam R. Howlett, Nico Pache, Ryan Roberts, Dev Jain,
Barry Song, Lance Yang, Matthew Brost, Joshua Hahn, Rakie Kim,
Byungchul Park, Gregory Price, Ying Huang, Alistair Popple,
Kemeng Shi, Nhat Pham, Baoquan He, Youngjun Park, linux-nfs,
linux-kernel, linux-mm, linux-fsdevel
From: Ye Liu <liuye@kylinos.cn>
Replace node_stat_mod_folio() calls that pass folio_nr_pages(folio) or
-folio_nr_pages(folio) as the third argument with the more concise
node_stat_add_folio() and node_stat_sub_folio() functions respectively.
This makes the code more readable and reduces the number of arguments
passed to these functions.
Signed-off-by: Ye Liu <liuye@kylinos.cn>
---
fs/nfs/internal.h | 2 +-
fs/nfs/write.c | 2 +-
mm/compaction.c | 5 ++---
mm/gup.c | 5 ++---
mm/khugepaged.c | 10 ++++------
mm/mempolicy.c | 5 ++---
mm/migrate.c | 12 +++++-------
mm/page-writeback.c | 4 ++--
mm/swap_state.c | 4 ++--
9 files changed, 21 insertions(+), 28 deletions(-)
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index fc5456377160..f5c52a2d2a1f 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -858,7 +858,7 @@ static inline void nfs_folio_mark_unstable(struct folio *folio,
/* This page is really still in write-back - just that the
* writeback is happening on the server now.
*/
- node_stat_mod_folio(folio, NR_WRITEBACK, nr);
+ node_stat_add_folio(folio, NR_WRITEBACK);
bdi_wb_stat_mod(inode, WB_WRITEBACK, nr);
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
}
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index cc02b57de3c7..a8700824a61b 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -871,7 +871,7 @@ static void nfs_folio_clear_commit(struct folio *folio)
if (folio) {
long nr = folio_nr_pages(folio);
- node_stat_mod_folio(folio, NR_WRITEBACK, -nr);
+ node_stat_sub_folio(folio, NR_WRITEBACK);
bdi_wb_stat_mod(folio->mapping->host, WB_WRITEBACK, -nr);
}
}
diff --git a/mm/compaction.c b/mm/compaction.c
index 3648ce22c807..d7ce622aeed1 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1215,9 +1215,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
/* Successfully isolated */
lruvec_del_folio(lruvec, folio);
- node_stat_mod_folio(folio,
- NR_ISOLATED_ANON + folio_is_file_lru(folio),
- folio_nr_pages(folio));
+ node_stat_add_folio(folio,
+ NR_ISOLATED_ANON + folio_is_file_lru(folio));
isolate_success:
list_add(&folio->lru, &cc->migratepages);
diff --git a/mm/gup.c b/mm/gup.c
index ad9ded39609c..2cb2efa20bff 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -2304,9 +2304,8 @@ static unsigned long collect_longterm_unpinnable_folios(
continue;
list_add_tail(&folio->lru, movable_folio_list);
- node_stat_mod_folio(folio,
- NR_ISOLATED_ANON + folio_is_file_lru(folio),
- folio_nr_pages(folio));
+ node_stat_add_folio(folio,
+ NR_ISOLATED_ANON + folio_is_file_lru(folio));
}
return collected;
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index b8452dbdb043..f662de753305 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -502,9 +502,8 @@ void __khugepaged_exit(struct mm_struct *mm)
static void release_pte_folio(struct folio *folio)
{
- node_stat_mod_folio(folio,
- NR_ISOLATED_ANON + folio_is_file_lru(folio),
- -folio_nr_pages(folio));
+ node_stat_sub_folio(folio,
+ NR_ISOLATED_ANON + folio_is_file_lru(folio));
folio_unlock(folio);
folio_putback_lru(folio);
}
@@ -650,9 +649,8 @@ static enum scan_result __collapse_huge_page_isolate(struct vm_area_struct *vma,
result = SCAN_DEL_PAGE_LRU;
goto out;
}
- node_stat_mod_folio(folio,
- NR_ISOLATED_ANON + folio_is_file_lru(folio),
- folio_nr_pages(folio));
+ node_stat_add_folio(folio,
+ NR_ISOLATED_ANON + folio_is_file_lru(folio));
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 4e4421b22b59..1c413f66b35f 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1259,9 +1259,8 @@ static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist,
if ((flags & MPOL_MF_MOVE_ALL) || !folio_maybe_mapped_shared(folio)) {
if (folio_isolate_lru(folio)) {
list_add_tail(&folio->lru, foliolist);
- node_stat_mod_folio(folio,
- NR_ISOLATED_ANON + folio_is_file_lru(folio),
- folio_nr_pages(folio));
+ node_stat_add_folio(folio,
+ NR_ISOLATED_ANON + folio_is_file_lru(folio));
} else {
/*
* Non-movable folio may reach here. And, there may be
diff --git a/mm/migrate.c b/mm/migrate.c
index 8a64291ab5b4..dc8cfee37a70 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -268,8 +268,8 @@ void putback_movable_pages(struct list_head *l)
if (unlikely(page_has_movable_ops(&folio->page))) {
putback_movable_ops_page(&folio->page);
} else {
- node_stat_mod_folio(folio, NR_ISOLATED_ANON +
- folio_is_file_lru(folio), -folio_nr_pages(folio));
+ node_stat_sub_folio(folio, NR_ISOLATED_ANON +
+ folio_is_file_lru(folio));
folio_putback_lru(folio);
}
}
@@ -2272,9 +2272,8 @@ static int __add_folio_for_migration(struct folio *folio, int node,
return 1;
} else if (folio_isolate_lru(folio)) {
list_add_tail(&folio->lru, pagelist);
- node_stat_mod_folio(folio,
- NR_ISOLATED_ANON + folio_is_file_lru(folio),
- folio_nr_pages(folio));
+ node_stat_add_folio(folio,
+ NR_ISOLATED_ANON + folio_is_file_lru(folio));
return 1;
}
return -EBUSY;
@@ -2726,8 +2725,7 @@ int migrate_misplaced_folio_prepare(struct folio *folio,
if (!folio_isolate_lru(folio))
return -EAGAIN;
- node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio),
- nr_pages);
+ node_stat_add_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio));
return 0;
}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 833f743f309f..87e9ea41313a 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2750,7 +2750,7 @@ bool folio_redirty_for_writepage(struct writeback_control *wbc,
wb = unlocked_inode_to_wb_begin(inode, &cookie);
current->nr_dirtied -= nr;
- node_stat_mod_folio(folio, NR_DIRTIED, -nr);
+ node_stat_sub_folio(folio, NR_DIRTIED);
wb_stat_mod(wb, WB_DIRTIED, -nr);
unlocked_inode_to_wb_end(inode, &cookie);
}
@@ -2981,7 +2981,7 @@ bool __folio_end_writeback(struct folio *folio)
lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr);
zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
- node_stat_mod_folio(folio, NR_WRITTEN, nr);
+ node_stat_add_folio(folio, NR_WRITTEN);
return ret;
}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 1415a5c54a43..d08e923c9979 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -160,7 +160,7 @@ void __swap_cache_add_folio(struct swap_cluster_info *ci,
folio_set_swapcache(folio);
folio->swap = entry;
- node_stat_mod_folio(folio, NR_FILE_PAGES, nr_pages);
+ node_stat_add_folio(folio, NR_FILE_PAGES);
lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr_pages);
}
@@ -265,7 +265,7 @@ void __swap_cache_del_folio(struct swap_cluster_info *ci, struct folio *folio,
folio->swap.val = 0;
folio_clear_swapcache(folio);
- node_stat_mod_folio(folio, NR_FILE_PAGES, -nr_pages);
+ node_stat_sub_folio(folio, NR_FILE_PAGES);
lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr_pages);
if (!folio_swapped) {
--
2.43.0
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH 1/4] mm/vmstat: use node_stat_add_folio/sub_folio for folio_nr_pages operations
2026-04-14 9:15 ` [PATCH 1/4] mm/vmstat: use node_stat_add_folio/sub_folio for folio_nr_pages operations Ye Liu
@ 2026-04-14 17:52 ` David Hildenbrand (Arm)
2026-04-15 0:48 ` Ye Liu
0 siblings, 1 reply; 3+ messages in thread
From: David Hildenbrand (Arm) @ 2026-04-14 17:52 UTC (permalink / raw)
To: Ye Liu, Trond Myklebust, Anna Schumaker, Andrew Morton,
Vlastimil Babka, Lorenzo Stoakes, Matthew Wilcox (Oracle),
Chris Li, Kairui Song
Cc: Ye Liu, Suren Baghdasaryan, Michal Hocko, Brendan Jackman,
Johannes Weiner, Zi Yan, Jason Gunthorpe, John Hubbard, Peter Xu,
Baolin Wang, Liam R. Howlett, Nico Pache, Ryan Roberts, Dev Jain,
Barry Song, Lance Yang, Matthew Brost, Joshua Hahn, Rakie Kim,
Byungchul Park, Gregory Price, Ying Huang, Alistair Popple,
Kemeng Shi, Nhat Pham, Baoquan He, Youngjun Park, linux-nfs,
linux-kernel, linux-mm, linux-fsdevel
On 4/14/26 11:15, Ye Liu wrote:
> From: Ye Liu <liuye@kylinos.cn>
>
> Replace node_stat_mod_folio() calls that pass folio_nr_pages(folio) or
> -folio_nr_pages(folio) as the third argument with the more concise
> node_stat_add_folio() and node_stat_sub_folio() functions respectively.
>
> This makes the code more readable and reduces the number of arguments
> passed to these functions.
Also, that makes it clearer that we always account the full folio, never
parts of it.
--
Cheers,
David
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH 1/4] mm/vmstat: use node_stat_add_folio/sub_folio for folio_nr_pages operations
2026-04-14 17:52 ` David Hildenbrand (Arm)
@ 2026-04-15 0:48 ` Ye Liu
0 siblings, 0 replies; 3+ messages in thread
From: Ye Liu @ 2026-04-15 0:48 UTC (permalink / raw)
To: David Hildenbrand (Arm), Trond Myklebust, Anna Schumaker,
Andrew Morton, Vlastimil Babka, Lorenzo Stoakes,
Matthew Wilcox (Oracle), Chris Li, Kairui Song
Cc: Ye Liu, Suren Baghdasaryan, Michal Hocko, Brendan Jackman,
Johannes Weiner, Zi Yan, Jason Gunthorpe, John Hubbard, Peter Xu,
Baolin Wang, Liam R. Howlett, Nico Pache, Ryan Roberts, Dev Jain,
Barry Song, Lance Yang, Matthew Brost, Joshua Hahn, Rakie Kim,
Byungchul Park, Gregory Price, Ying Huang, Alistair Popple,
Kemeng Shi, Nhat Pham, Baoquan He, Youngjun Park, linux-nfs,
linux-kernel, linux-mm, linux-fsdevel
在 2026/4/15 01:52, David Hildenbrand (Arm) 写道:
> On 4/14/26 11:15, Ye Liu wrote:
>> From: Ye Liu <liuye@kylinos.cn>
>>
>> Replace node_stat_mod_folio() calls that pass folio_nr_pages(folio) or
>> -folio_nr_pages(folio) as the third argument with the more concise
>> node_stat_add_folio() and node_stat_sub_folio() functions respectively.
>>
>> This makes the code more readable and reduces the number of arguments
>> passed to these functions.
>
> Also, that makes it clearer that we always account the full folio, never
> parts of it.
>
Thank you, David, for your support. I agree that the new functions make
the intent more explicit.
--
Thanks,
Ye Liu
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2026-04-15 0:48 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
[not found] <20260414091527.2970844-1-ye.liu@linux.dev>
2026-04-14 9:15 ` [PATCH 1/4] mm/vmstat: use node_stat_add_folio/sub_folio for folio_nr_pages operations Ye Liu
2026-04-14 17:52 ` David Hildenbrand (Arm)
2026-04-15 0:48 ` Ye Liu
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox