From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
Christoph Hellwig <hch@infradead.org>,
linux-fsdevel@vger.kernel.org, linux-mm@kvack.org
Subject: [PATCH 2/2] mpage: Convert __mpage_writepage() to use a folio more fully
Date: Thu, 26 Jan 2023 20:12:55 +0000 [thread overview]
Message-ID: <20230126201255.1681189-3-willy@infradead.org> (raw)
In-Reply-To: <20230126201255.1681189-1-willy@infradead.org>
This is just a conversion to the folio API. While there are some nods
towards supporting multi-page folios in here, the blocks array is
still sized for one page's worth of blocks, and there are other
assumptions such as the blocks_per_page variable.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
fs/mpage.c | 45 +++++++++++++++++++++------------------------
1 file changed, 21 insertions(+), 24 deletions(-)
diff --git a/fs/mpage.c b/fs/mpage.c
index 840f57ed2542..2efa393f0db7 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -448,13 +448,11 @@ void clean_page_buffers(struct page *page)
static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
void *data)
{
- struct page *page = &folio->page;
struct mpage_data *mpd = data;
struct bio *bio = mpd->bio;
- struct address_space *mapping = page->mapping;
- struct inode *inode = page->mapping->host;
+ struct address_space *mapping = folio->mapping;
+ struct inode *inode = mapping->host;
const unsigned blkbits = inode->i_blkbits;
- unsigned long end_index;
const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
sector_t last_block;
sector_t block_in_file;
@@ -465,13 +463,13 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
int boundary = 0;
sector_t boundary_block = 0;
struct block_device *boundary_bdev = NULL;
- int length;
+ size_t length;
struct buffer_head map_bh;
loff_t i_size = i_size_read(inode);
int ret = 0;
+ struct buffer_head *head = folio_buffers(folio);
- if (page_has_buffers(page)) {
- struct buffer_head *head = page_buffers(page);
+ if (head) {
struct buffer_head *bh = head;
/* If they're all mapped and dirty, do it */
@@ -523,8 +521,8 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
/*
* The page has no buffers: map it to disk
*/
- BUG_ON(!PageUptodate(page));
- block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
+ BUG_ON(!folio_test_uptodate(folio));
+ block_in_file = (sector_t)folio->index << (PAGE_SHIFT - blkbits);
/*
* Whole page beyond EOF? Skip allocating blocks to avoid leaking
* space.
@@ -532,7 +530,7 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
if (block_in_file >= (i_size + (1 << blkbits) - 1) >> blkbits)
goto page_is_mapped;
last_block = (i_size - 1) >> blkbits;
- map_bh.b_page = page;
+ map_bh.b_folio = folio;
for (page_block = 0; page_block < blocks_per_page; ) {
map_bh.b_state = 0;
@@ -561,8 +559,8 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
first_unmapped = page_block;
page_is_mapped:
- end_index = i_size >> PAGE_SHIFT;
- if (page->index >= end_index) {
+ length = folio_size(folio);
+ if (folio_pos(folio) + length > i_size) {
/*
* The page straddles i_size. It must be zeroed out on each
* and every writepage invocation because it may be mmapped.
@@ -571,11 +569,10 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
* is zeroed when mapped, and writes to that region are not
* written out to the file."
*/
- unsigned offset = i_size & (PAGE_SIZE - 1);
-
- if (page->index > end_index || !offset)
+ length = i_size - folio_pos(folio);
+ if (WARN_ON_ONCE(folio_pos(folio) >= i_size))
goto confused;
- zero_user_segment(page, offset, PAGE_SIZE);
+ folio_zero_segment(folio, length, folio_size(folio));
}
/*
@@ -588,7 +585,7 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
if (bio == NULL) {
if (first_unmapped == blocks_per_page) {
if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9),
- page, wbc))
+ &folio->page, wbc))
goto out;
}
bio = bio_alloc(bdev, BIO_MAX_VECS,
@@ -603,18 +600,18 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
* the confused fail path above (OOM) will be very confused when
* it finds all bh marked clean (i.e. it will not write anything)
*/
- wbc_account_cgroup_owner(wbc, page, PAGE_SIZE);
+ wbc_account_cgroup_owner(wbc, &folio->page, folio_size(folio));
length = first_unmapped << blkbits;
- if (bio_add_page(bio, page, length, 0) < length) {
+ if (!bio_add_folio(bio, folio, length, 0)) {
bio = mpage_bio_submit(bio);
goto alloc_new;
}
- clean_buffers(page, first_unmapped);
+ clean_buffers(&folio->page, first_unmapped);
- BUG_ON(PageWriteback(page));
- set_page_writeback(page);
- unlock_page(page);
+ BUG_ON(folio_test_writeback(folio));
+ folio_start_writeback(folio);
+ folio_unlock(folio);
if (boundary || (first_unmapped != blocks_per_page)) {
bio = mpage_bio_submit(bio);
if (boundary_block) {
@@ -633,7 +630,7 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
/*
* The caller has a ref on the inode, so *mapping is stable
*/
- ret = block_write_full_page(page, mpd->get_block, wbc);
+ ret = block_write_full_page(&folio->page, mpd->get_block, wbc);
mapping_set_error(mapping, ret);
out:
mpd->bio = bio;
--
2.35.1
next prev parent reply other threads:[~2023-01-26 20:13 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-01-26 20:12 [PATCH 0/2] Convert writepage_t to use a folio Matthew Wilcox (Oracle)
2023-01-26 20:12 ` [PATCH 1/2] fs: Convert writepage_t callback to pass " Matthew Wilcox (Oracle)
2023-01-28 17:13 ` kernel test robot
2023-01-29 21:33 ` Andrew Morton
2023-01-26 20:12 ` Matthew Wilcox (Oracle) [this message]
2023-01-26 22:59 ` [PATCH 2/2] mpage: Convert __mpage_writepage() to use a folio more fully Andrew Morton
2023-01-30 11:04 ` Alexander Egorenkov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230126201255.1681189-3-willy@infradead.org \
--to=willy@infradead.org \
--cc=akpm@linux-foundation.org \
--cc=hch@infradead.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-mm@kvack.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).