* [PATCH v4 2/2] mm/page_io: rename swap_iocb->pages to swap_iocb->bvecs
2026-04-02 6:14 [PATCH v4 1/2] mm/page_io: use sio->len for PSWPIN accounting in sio_read_complete() David Carlier
@ 2026-04-02 6:14 ` David Carlier
2026-04-02 7:15 ` David Hildenbrand (Arm)
2026-04-02 7:12 ` [PATCH v4 1/2] mm/page_io: use sio->len for PSWPIN accounting in sio_read_complete() David Hildenbrand (Arm)
1 sibling, 1 reply; 5+ messages in thread
From: David Carlier @ 2026-04-02 6:14 UTC (permalink / raw)
To: David Hildenbrand, Kairui Song, Chris Li, Andrew Morton,
Kemeng Shi, Nhat Pham, Baoquan He, Youngjun Park, NeilBrown
Cc: linux-kernel, linux-mm, David Carlier, Matthew Wilcox (Oracle)
swap_iocb->pages tracks the number of bvec entries (folios), not base
pages. Rename it to bvecs to accurately reflect its purpose.
Suggested-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: David Carlier <devnexen@gmail.com>
---
mm/page_io.c | 30 +++++++++++++++---------------
1 file changed, 15 insertions(+), 15 deletions(-)
diff --git a/mm/page_io.c b/mm/page_io.c
index 1389cd57ca88..e524cb821d04 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -327,7 +327,7 @@ static void bio_associate_blkg_from_page(struct bio *bio, struct folio *folio)
struct swap_iocb {
struct kiocb iocb;
struct bio_vec bvec[SWAP_CLUSTER_MAX];
- int pages;
+ int bvecs;
int len;
};
static mempool_t *sio_pool;
@@ -362,14 +362,14 @@ static void sio_write_complete(struct kiocb *iocb, long ret)
*/
pr_err_ratelimited("Write error %ld on dio swapfile (%llu)\n",
ret, swap_dev_pos(page_swap_entry(page)));
- for (p = 0; p < sio->pages; p++) {
+ for (p = 0; p < sio->bvecs; p++) {
page = sio->bvec[p].bv_page;
set_page_dirty(page);
ClearPageReclaim(page);
}
}
- for (p = 0; p < sio->pages; p++)
+ for (p = 0; p < sio->bvecs; p++)
end_page_writeback(sio->bvec[p].bv_page);
mempool_free(sio, sio_pool);
@@ -397,13 +397,13 @@ static void swap_writepage_fs(struct folio *folio, struct swap_iocb **swap_plug)
init_sync_kiocb(&sio->iocb, swap_file);
sio->iocb.ki_complete = sio_write_complete;
sio->iocb.ki_pos = pos;
- sio->pages = 0;
+ sio->bvecs = 0;
sio->len = 0;
}
- bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0);
+ bvec_set_folio(&sio->bvec[sio->bvecs], folio, folio_size(folio), 0);
sio->len += folio_size(folio);
- sio->pages += 1;
- if (sio->pages == ARRAY_SIZE(sio->bvec) || !swap_plug) {
+ sio->bvecs += 1;
+ if (sio->bvecs == ARRAY_SIZE(sio->bvec) || !swap_plug) {
swap_write_unplug(sio);
sio = NULL;
}
@@ -477,7 +477,7 @@ void swap_write_unplug(struct swap_iocb *sio)
struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
int ret;
- iov_iter_bvec(&from, ITER_SOURCE, sio->bvec, sio->pages, sio->len);
+ iov_iter_bvec(&from, ITER_SOURCE, sio->bvec, sio->bvecs, sio->len);
ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
if (ret != -EIOCBQUEUED)
sio_write_complete(&sio->iocb, ret);
@@ -489,7 +489,7 @@ static void sio_read_complete(struct kiocb *iocb, long ret)
int p;
if (ret == sio->len) {
- for (p = 0; p < sio->pages; p++) {
+ for (p = 0; p < sio->bvecs; p++) {
struct folio *folio = page_folio(sio->bvec[p].bv_page);
count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN);
@@ -499,7 +499,7 @@ static void sio_read_complete(struct kiocb *iocb, long ret)
}
count_vm_events(PSWPIN, sio->len >> PAGE_SHIFT);
} else {
- for (p = 0; p < sio->pages; p++) {
+ for (p = 0; p < sio->bvecs; p++) {
struct folio *folio = page_folio(sio->bvec[p].bv_page);
folio_unlock(folio);
@@ -559,13 +559,13 @@ static void swap_read_folio_fs(struct folio *folio, struct swap_iocb **plug)
init_sync_kiocb(&sio->iocb, sis->swap_file);
sio->iocb.ki_pos = pos;
sio->iocb.ki_complete = sio_read_complete;
- sio->pages = 0;
+ sio->bvecs = 0;
sio->len = 0;
}
- bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0);
+ bvec_set_folio(&sio->bvec[sio->bvecs], folio, folio_size(folio), 0);
sio->len += folio_size(folio);
- sio->pages += 1;
- if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) {
+ sio->bvecs += 1;
+ if (sio->bvecs == ARRAY_SIZE(sio->bvec) || !plug) {
swap_read_unplug(sio);
sio = NULL;
}
@@ -666,7 +666,7 @@ void __swap_read_unplug(struct swap_iocb *sio)
struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
int ret;
- iov_iter_bvec(&from, ITER_DEST, sio->bvec, sio->pages, sio->len);
+ iov_iter_bvec(&from, ITER_DEST, sio->bvec, sio->bvecs, sio->len);
ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
if (ret != -EIOCBQUEUED)
sio_read_complete(&sio->iocb, ret);
--
2.53.0
^ permalink raw reply related [flat|nested] 5+ messages in thread* Re: [PATCH v4 1/2] mm/page_io: use sio->len for PSWPIN accounting in sio_read_complete()
2026-04-02 6:14 [PATCH v4 1/2] mm/page_io: use sio->len for PSWPIN accounting in sio_read_complete() David Carlier
2026-04-02 6:14 ` [PATCH v4 2/2] mm/page_io: rename swap_iocb->pages to swap_iocb->bvecs David Carlier
@ 2026-04-02 7:12 ` David Hildenbrand (Arm)
1 sibling, 0 replies; 5+ messages in thread
From: David Hildenbrand (Arm) @ 2026-04-02 7:12 UTC (permalink / raw)
To: David Carlier, Kairui Song, Chris Li, Andrew Morton, Kemeng Shi,
Nhat Pham, Baoquan He, Youngjun Park, NeilBrown
Cc: linux-kernel, linux-mm
On 4/2/26 08:14, David Carlier wrote:
> sio_read_complete() uses sio->pages to account global PSWPIN vm events,
> but sio->pages tracks the number of bvec entries (folios), not base
> pages.
>
> While large folios cannot currently reach this path (SWP_FS_OPS and
> SWP_SYNCHRONOUS_IO are mutually exclusive, and mTHP swap-in allocation
> is gated on SWP_SYNCHRONOUS_IO), the accounting is semantically
> inconsistent with the per-memcg path which correctly uses
> folio_nr_pages().
>
> Use sio->len >> PAGE_SHIFT instead, which gives the correct base page
> count since sio->len is accumulated via folio_size(folio).
>
> Signed-off-by: David Carlier <devnexen@gmail.com>
> ---
Acked-by: David Hildenbrand (Arm) <david@kernel.org>
--
Cheers,
David
^ permalink raw reply [flat|nested] 5+ messages in thread