public inbox for linux-mm@kvack.org
 help / color / mirror / Atom feed
* [PATCH v4] mm/page_io: rename swap_iocb->pages to swap_iocb->bvecs
@ 2026-04-02  6:12 David Carlier
  2026-04-02  6:17 ` David CARLIER
  0 siblings, 1 reply; 2+ messages in thread
From: David Carlier @ 2026-04-02  6:12 UTC (permalink / raw)
  To: David Hildenbrand, Kairui Song, Chris Li, Andrew Morton,
	Kemeng Shi, Nhat Pham, Baoquan He, Youngjun Park, NeilBrown
  Cc: linux-kernel, linux-mm, David Carlier, Matthew Wilcox (Oracle)

swap_iocb->pages tracks the number of bvec entries (folios), not base
pages. Rename it to bvecs to accurately reflect its purpose.

Suggested-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: David Carlier <devnexen@gmail.com>
---
v4: renamed swap_iocb->pages to ->bvecs (Willy)                                                                                                                       
v3: use sio->len for PSWPIN accounting                                                                                                                                
v2: convert sio_write_complete() to folio APIs
 mm/page_io.c | 30 +++++++++++++++---------------
 1 file changed, 15 insertions(+), 15 deletions(-)

diff --git a/mm/page_io.c b/mm/page_io.c
index 1389cd57ca88..e524cb821d04 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -327,7 +327,7 @@ static void bio_associate_blkg_from_page(struct bio *bio, struct folio *folio)
 struct swap_iocb {
 	struct kiocb		iocb;
 	struct bio_vec		bvec[SWAP_CLUSTER_MAX];
-	int			pages;
+	int			bvecs;
 	int			len;
 };
 static mempool_t *sio_pool;
@@ -362,14 +362,14 @@ static void sio_write_complete(struct kiocb *iocb, long ret)
 		 */
 		pr_err_ratelimited("Write error %ld on dio swapfile (%llu)\n",
 				   ret, swap_dev_pos(page_swap_entry(page)));
-		for (p = 0; p < sio->pages; p++) {
+		for (p = 0; p < sio->bvecs; p++) {
 			page = sio->bvec[p].bv_page;
 			set_page_dirty(page);
 			ClearPageReclaim(page);
 		}
 	}
 
-	for (p = 0; p < sio->pages; p++)
+	for (p = 0; p < sio->bvecs; p++)
 		end_page_writeback(sio->bvec[p].bv_page);
 
 	mempool_free(sio, sio_pool);
@@ -397,13 +397,13 @@ static void swap_writepage_fs(struct folio *folio, struct swap_iocb **swap_plug)
 		init_sync_kiocb(&sio->iocb, swap_file);
 		sio->iocb.ki_complete = sio_write_complete;
 		sio->iocb.ki_pos = pos;
-		sio->pages = 0;
+		sio->bvecs = 0;
 		sio->len = 0;
 	}
-	bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0);
+	bvec_set_folio(&sio->bvec[sio->bvecs], folio, folio_size(folio), 0);
 	sio->len += folio_size(folio);
-	sio->pages += 1;
-	if (sio->pages == ARRAY_SIZE(sio->bvec) || !swap_plug) {
+	sio->bvecs += 1;
+	if (sio->bvecs == ARRAY_SIZE(sio->bvec) || !swap_plug) {
 		swap_write_unplug(sio);
 		sio = NULL;
 	}
@@ -477,7 +477,7 @@ void swap_write_unplug(struct swap_iocb *sio)
 	struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
 	int ret;
 
-	iov_iter_bvec(&from, ITER_SOURCE, sio->bvec, sio->pages, sio->len);
+	iov_iter_bvec(&from, ITER_SOURCE, sio->bvec, sio->bvecs, sio->len);
 	ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
 	if (ret != -EIOCBQUEUED)
 		sio_write_complete(&sio->iocb, ret);
@@ -489,7 +489,7 @@ static void sio_read_complete(struct kiocb *iocb, long ret)
 	int p;
 
 	if (ret == sio->len) {
-		for (p = 0; p < sio->pages; p++) {
+		for (p = 0; p < sio->bvecs; p++) {
 			struct folio *folio = page_folio(sio->bvec[p].bv_page);
 
 			count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN);
@@ -499,7 +499,7 @@ static void sio_read_complete(struct kiocb *iocb, long ret)
 		}
 		count_vm_events(PSWPIN, sio->len >> PAGE_SHIFT);
 	} else {
-		for (p = 0; p < sio->pages; p++) {
+		for (p = 0; p < sio->bvecs; p++) {
 			struct folio *folio = page_folio(sio->bvec[p].bv_page);
 
 			folio_unlock(folio);
@@ -559,13 +559,13 @@ static void swap_read_folio_fs(struct folio *folio, struct swap_iocb **plug)
 		init_sync_kiocb(&sio->iocb, sis->swap_file);
 		sio->iocb.ki_pos = pos;
 		sio->iocb.ki_complete = sio_read_complete;
-		sio->pages = 0;
+		sio->bvecs = 0;
 		sio->len = 0;
 	}
-	bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0);
+	bvec_set_folio(&sio->bvec[sio->bvecs], folio, folio_size(folio), 0);
 	sio->len += folio_size(folio);
-	sio->pages += 1;
-	if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) {
+	sio->bvecs += 1;
+	if (sio->bvecs == ARRAY_SIZE(sio->bvec) || !plug) {
 		swap_read_unplug(sio);
 		sio = NULL;
 	}
@@ -666,7 +666,7 @@ void __swap_read_unplug(struct swap_iocb *sio)
 	struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
 	int ret;
 
-	iov_iter_bvec(&from, ITER_DEST, sio->bvec, sio->pages, sio->len);
+	iov_iter_bvec(&from, ITER_DEST, sio->bvec, sio->bvecs, sio->len);
 	ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
 	if (ret != -EIOCBQUEUED)
 		sio_read_complete(&sio->iocb, ret);
-- 
2.53.0



^ permalink raw reply related	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2026-04-02  6:17 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-04-02  6:12 [PATCH v4] mm/page_io: rename swap_iocb->pages to swap_iocb->bvecs David Carlier
2026-04-02  6:17 ` David CARLIER

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox