public inbox for linux-btrfs@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH] btrfs: Convert add_ra_bio_pages() to use a folio
@ 2024-01-26  6:56 Matthew Wilcox (Oracle)
  2024-01-29 14:55 ` Johannes Thumshirn
  2024-02-27 21:35 ` Matthew Wilcox
  0 siblings, 2 replies; 5+ messages in thread
From: Matthew Wilcox (Oracle) @ 2024-01-26  6:56 UTC (permalink / raw)
  To: Qu Wenruo; +Cc: Matthew Wilcox (Oracle), David Sterba, linux-btrfs

Allocate order-0 folios instead of pages.  Saves twelve hidden calls
to compound_head().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 fs/btrfs/compression.c | 58 ++++++++++++++++++++----------------------
 1 file changed, 28 insertions(+), 30 deletions(-)

diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 68345f73d429..517f9bc58749 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -421,7 +421,6 @@ static noinline int add_ra_bio_pages(struct inode *inode,
 	u64 cur = cb->orig_bbio->file_offset + orig_bio->bi_iter.bi_size;
 	u64 isize = i_size_read(inode);
 	int ret;
-	struct page *page;
 	struct extent_map *em;
 	struct address_space *mapping = inode->i_mapping;
 	struct extent_map_tree *em_tree;
@@ -447,6 +446,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
 	end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
 
 	while (cur < compressed_end) {
+		struct folio *folio;
 		u64 page_end;
 		u64 pg_index = cur >> PAGE_SHIFT;
 		u32 add_size;
@@ -454,8 +454,12 @@ static noinline int add_ra_bio_pages(struct inode *inode,
 		if (pg_index > end_index)
 			break;
 
-		page = xa_load(&mapping->i_pages, pg_index);
-		if (page && !xa_is_value(page)) {
+		folio = xa_load(&mapping->i_pages, pg_index);
+		if (folio && !xa_is_value(folio)) {
+			/*
+			 * We don't have a reference count on the folio,
+			 * so it is unsafe to refer to folio_size()
+			 */
 			sectors_missed += (PAGE_SIZE - offset_in_page(cur)) >>
 					  fs_info->sectorsize_bits;
 
@@ -471,38 +475,38 @@ static noinline int add_ra_bio_pages(struct inode *inode,
 			continue;
 		}
 
-		page = __page_cache_alloc(mapping_gfp_constraint(mapping,
-								 ~__GFP_FS));
-		if (!page)
+		folio = filemap_alloc_folio(mapping_gfp_constraint(mapping,
+				~__GFP_FS), 0);
+		if (!folio)
 			break;
 
-		if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
-			put_page(page);
+		if (filemap_add_folio(mapping, folio, pg_index, GFP_NOFS)) {
+			folio_put(folio);
 			/* There is already a page, skip to page end */
 			cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE;
 			continue;
 		}
 
-		if (!*memstall && PageWorkingset(page)) {
+		if (!*memstall && folio_test_workingset(folio)) {
 			psi_memstall_enter(pflags);
 			*memstall = 1;
 		}
 
-		ret = set_page_extent_mapped(page);
+		ret = set_folio_extent_mapped(folio);
 		if (ret < 0) {
-			unlock_page(page);
-			put_page(page);
+			folio_unlock(folio);
+			folio_put(folio);
 			break;
 		}
 
-		page_end = (pg_index << PAGE_SHIFT) + PAGE_SIZE - 1;
+		page_end = folio_pos(folio) + folio_size(folio) - 1;
 		lock_extent(tree, cur, page_end, NULL);
 		read_lock(&em_tree->lock);
 		em = lookup_extent_mapping(em_tree, cur, page_end + 1 - cur);
 		read_unlock(&em_tree->lock);
 
 		/*
-		 * At this point, we have a locked page in the page cache for
+		 * At this point, we have a locked folio in the page cache for
 		 * these bytes in the file.  But, we have to make sure they map
 		 * to this compressed extent on disk.
 		 */
@@ -511,28 +515,22 @@ static noinline int add_ra_bio_pages(struct inode *inode,
 		    (em->block_start >> SECTOR_SHIFT) != orig_bio->bi_iter.bi_sector) {
 			free_extent_map(em);
 			unlock_extent(tree, cur, page_end, NULL);
-			unlock_page(page);
-			put_page(page);
+			folio_unlock(folio);
+			folio_put(folio);
 			break;
 		}
 		free_extent_map(em);
 
-		if (page->index == end_index) {
-			size_t zero_offset = offset_in_page(isize);
-
-			if (zero_offset) {
-				int zeros;
-				zeros = PAGE_SIZE - zero_offset;
-				memzero_page(page, zero_offset, zeros);
-			}
-		}
+		if (folio->index == end_index)
+			folio_zero_segment(folio, offset_in_page(isize),
+					folio_size(folio));
 
 		add_size = min(em->start + em->len, page_end + 1) - cur;
-		ret = bio_add_page(orig_bio, page, add_size, offset_in_page(cur));
+		ret = bio_add_folio(orig_bio, folio, add_size, offset_in_page(cur));
 		if (ret != add_size) {
 			unlock_extent(tree, cur, page_end, NULL);
-			unlock_page(page);
-			put_page(page);
+			folio_unlock(folio);
+			folio_put(folio);
 			break;
 		}
 		/*
@@ -541,9 +539,9 @@ static noinline int add_ra_bio_pages(struct inode *inode,
 		 * subpage::readers and to unlock the page.
 		 */
 		if (fs_info->sectorsize < PAGE_SIZE)
-			btrfs_subpage_start_reader(fs_info, page_folio(page),
+			btrfs_subpage_start_reader(fs_info, folio,
 						   cur, add_size);
-		put_page(page);
+		folio_put(folio);
 		cur += add_size;
 	}
 	return 0;
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2024-02-27 21:52 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-01-26  6:56 [PATCH] btrfs: Convert add_ra_bio_pages() to use a folio Matthew Wilcox (Oracle)
2024-01-29 14:55 ` Johannes Thumshirn
2024-02-15 22:04   ` Matthew Wilcox
2024-02-27 21:35 ` Matthew Wilcox
2024-02-27 21:52   ` Qu Wenruo

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox