linux-fsdevel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Jaegeuk Kim <jaegeuk@kernel.org>
To: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>
Cc: chao@kernel.org, linux-kernel@vger.kernel.org,
	linux-f2fs-devel@lists.sourceforge.net, linux-mm@kvack.org,
	fengnanchang@gmail.com, linux-fsdevel@vger.kernel.org
Subject: Re: [RFC PATCH] f2fs: Convert f2fs_write_cache_pages() to use filemap_get_folios_tag()
Date: Thu, 15 Dec 2022 11:02:24 -0800	[thread overview]
Message-ID: <Y5tvQKT8HWxngEnc@google.com> (raw)
In-Reply-To: <20221212191317.9730-1-vishal.moola@gmail.com>

On 12/12, Vishal Moola (Oracle) wrote:
> Converted the function to use a folio_batch instead of pagevec. This is in
> preparation for the removal of find_get_pages_range_tag().
> 
> Also modified f2fs_all_cluster_page_ready to take in a folio_batch instead
> of pagevec. This does NOT support large folios. The function currently
> only utilizes folios of size 1 so this shouldn't cause any issues right
> now.
> 
> This version of the patch limits the number of pages fetched to
> F2FS_ONSTACK_PAGES. If that ever happens, update the start index here
> since filemap_get_folios_tag() updates the index to be after the last
> found folio, not necessarily the last used page.
> 
> Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
> ---
> 
> Let me know if you prefer this version and I'll include it in v5
> of the patch series when I rebase it after the merge window.
> 
> ---
>  fs/f2fs/data.c | 86 ++++++++++++++++++++++++++++++++++----------------
>  1 file changed, 59 insertions(+), 27 deletions(-)
> 
> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> index a71e818cd67b..1703e353f0e0 100644
> --- a/fs/f2fs/data.c
> +++ b/fs/f2fs/data.c
> @@ -2939,6 +2939,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
>  	int ret = 0;
>  	int done = 0, retry = 0;
>  	struct page *pages[F2FS_ONSTACK_PAGES];
> +	struct folio_batch fbatch;
>  	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
>  	struct bio *bio = NULL;
>  	sector_t last_block;
> @@ -2959,6 +2960,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
>  		.private = NULL,
>  	};
>  #endif
> +	int nr_folios, p, idx;
>  	int nr_pages;
>  	pgoff_t index;
>  	pgoff_t end;		/* Inclusive */
> @@ -2969,6 +2971,8 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
>  	int submitted = 0;
>  	int i;
>  
> +	folio_batch_init(&fbatch);
> +
>  	if (get_dirty_pages(mapping->host) <=
>  				SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
>  		set_inode_flag(mapping->host, FI_HOT_DATA);
> @@ -2994,13 +2998,38 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
>  		tag_pages_for_writeback(mapping, index, end);
>  	done_index = index;
>  	while (!done && !retry && (index <= end)) {
> -		nr_pages = find_get_pages_range_tag(mapping, &index, end,
> -				tag, F2FS_ONSTACK_PAGES, pages);
> -		if (nr_pages == 0)
> +		nr_pages = 0;
> +again:
> +		nr_folios = filemap_get_folios_tag(mapping, &index, end,
> +				tag, &fbatch);

Can't folio handle this internally with F2FS_ONSTACK_PAGES and pages?

> +		if (nr_folios == 0) {
> +			if (nr_pages)
> +				goto write;
>  			break;
> +		}
>  
> +		for (i = 0; i < nr_folios; i++) {
> +			struct folio* folio = fbatch.folios[i];
> +
> +			idx = 0;
> +			p = folio_nr_pages(folio);
> +add_more:
> +			pages[nr_pages] = folio_page(folio,idx);
> +			folio_ref_inc(folio);
> +			if (++nr_pages == F2FS_ONSTACK_PAGES) {
> +				index = folio->index + idx + 1;
> +				folio_batch_release(&fbatch);
> +				goto write;
> +			}
> +			if (++idx < p)
> +				goto add_more;
> +		}
> +		folio_batch_release(&fbatch);
> +		goto again;
> +write:
>  		for (i = 0; i < nr_pages; i++) {
>  			struct page *page = pages[i];
> +			struct folio *folio = page_folio(page);
>  			bool need_readd;
>  readd:
>  			need_readd = false;
> @@ -3017,7 +3046,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
>  				}
>  
>  				if (!f2fs_cluster_can_merge_page(&cc,
> -								page->index)) {
> +								folio->index)) {
>  					ret = f2fs_write_multi_pages(&cc,
>  						&submitted, wbc, io_type);
>  					if (!ret)
> @@ -3026,27 +3055,28 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
>  				}
>  
>  				if (unlikely(f2fs_cp_error(sbi)))
> -					goto lock_page;
> +					goto lock_folio;
>  
>  				if (!f2fs_cluster_is_empty(&cc))
> -					goto lock_page;
> +					goto lock_folio;
>  
>  				if (f2fs_all_cluster_page_ready(&cc,
>  					pages, i, nr_pages, true))
> -					goto lock_page;
> +					goto lock_folio;
>  
>  				ret2 = f2fs_prepare_compress_overwrite(
>  							inode, &pagep,
> -							page->index, &fsdata);
> +							folio->index, &fsdata);
>  				if (ret2 < 0) {
>  					ret = ret2;
>  					done = 1;
>  					break;
>  				} else if (ret2 &&
>  					(!f2fs_compress_write_end(inode,
> -						fsdata, page->index, 1) ||
> +						fsdata, folio->index, 1) ||
>  					 !f2fs_all_cluster_page_ready(&cc,
> -						pages, i, nr_pages, false))) {
> +						pages, i, nr_pages,
> +						false))) {
>  					retry = 1;
>  					break;
>  				}
> @@ -3059,46 +3089,47 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
>  				break;
>  			}
>  #ifdef CONFIG_F2FS_FS_COMPRESSION
> -lock_page:
> +lock_folio:
>  #endif
> -			done_index = page->index;
> +			done_index = folio->index;
>  retry_write:
> -			lock_page(page);
> +			folio_lock(folio);
>  
> -			if (unlikely(page->mapping != mapping)) {
> +			if (unlikely(folio->mapping != mapping)) {
>  continue_unlock:
> -				unlock_page(page);
> +				folio_unlock(folio);
>  				continue;
>  			}
>  
> -			if (!PageDirty(page)) {
> +			if (!folio_test_dirty(folio)) {
>  				/* someone wrote it for us */
>  				goto continue_unlock;
>  			}
>  
> -			if (PageWriteback(page)) {
> +			if (folio_test_writeback(folio)) {
>  				if (wbc->sync_mode != WB_SYNC_NONE)
> -					f2fs_wait_on_page_writeback(page,
> +					f2fs_wait_on_page_writeback(
> +							&folio->page,
>  							DATA, true, true);
>  				else
>  					goto continue_unlock;
>  			}
>  
> -			if (!clear_page_dirty_for_io(page))
> +			if (!folio_clear_dirty_for_io(folio))
>  				goto continue_unlock;
>  
>  #ifdef CONFIG_F2FS_FS_COMPRESSION
>  			if (f2fs_compressed_file(inode)) {
> -				get_page(page);
> -				f2fs_compress_ctx_add_page(&cc, page);
> +				folio_get(folio);
> +				f2fs_compress_ctx_add_page(&cc, &folio->page);
>  				continue;
>  			}
>  #endif
> -			ret = f2fs_write_single_data_page(page, &submitted,
> -					&bio, &last_block, wbc, io_type,
> -					0, true);
> +			ret = f2fs_write_single_data_page(&folio->page,
> +					&submitted, &bio, &last_block,
> +					wbc, io_type, 0, true);
>  			if (ret == AOP_WRITEPAGE_ACTIVATE)
> -				unlock_page(page);
> +				folio_unlock(folio);
>  #ifdef CONFIG_F2FS_FS_COMPRESSION
>  result:
>  #endif
> @@ -3122,7 +3153,8 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
>  					}
>  					goto next;
>  				}
> -				done_index = page->index + 1;
> +				done_index = folio->index +
> +					folio_nr_pages(folio);
>  				done = 1;
>  				break;
>  			}
> @@ -3136,7 +3168,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
>  			if (need_readd)
>  				goto readd;
>  		}
> -		release_pages(pages, nr_pages);
> +		release_pages(pages,nr_pages);
>  		cond_resched();
>  	}
>  #ifdef CONFIG_F2FS_FS_COMPRESSION
> -- 
> 2.38.1

  parent reply	other threads:[~2022-12-15 19:02 UTC|newest]

Thread overview: 61+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-10-17 20:24 [PATCH v3 00/23] Convert to filemap_get_folios_tag() Vishal Moola (Oracle)
2022-10-17 20:24 ` [PATCH v3 01/23] pagemap: Add filemap_grab_folio() Vishal Moola (Oracle)
2022-10-24 19:36   ` Vishal Moola
2022-10-24 19:38   ` Matthew Wilcox
2022-10-17 20:24 ` [PATCH v3 02/23] filemap: Added filemap_get_folios_tag() Vishal Moola (Oracle)
2022-10-24 19:42   ` Matthew Wilcox
2022-10-17 20:24 ` [PATCH v3 03/23] filemap: Convert __filemap_fdatawait_range() to use filemap_get_folios_tag() Vishal Moola (Oracle)
2022-10-24 20:06   ` Matthew Wilcox
2022-10-17 20:24 ` [PATCH v3 04/23] page-writeback: Convert write_cache_pages() " Vishal Moola (Oracle)
2022-10-24 20:12   ` Matthew Wilcox
2022-10-17 20:24 ` [PATCH v3 05/23] afs: Convert afs_writepages_region() " Vishal Moola (Oracle)
2022-10-17 20:24 ` [PATCH v3 06/23] btrfs: Convert btree_write_cache_pages() to use filemap_get_folio_tag() Vishal Moola (Oracle)
2022-10-17 20:24 ` [PATCH v3 07/23] btrfs: Convert extent_write_cache_pages() to use filemap_get_folios_tag() Vishal Moola (Oracle)
2022-10-17 20:24 ` [PATCH v3 08/23] ceph: Convert ceph_writepages_start() " Vishal Moola (Oracle)
2022-10-28 17:20   ` Jeff Layton
2022-10-17 20:24 ` [PATCH v3 09/23] cifs: Convert wdata_alloc_and_fillpages() " Vishal Moola (Oracle)
2022-10-23 14:01   ` kernel test robot
2022-10-17 20:24 ` [PATCH v3 10/23] ext4: Convert mpage_prepare_extent_to_map() " Vishal Moola (Oracle)
2022-10-24 19:26   ` Vishal Moola
2022-10-17 20:24 ` [PATCH v3 11/23] f2fs: Convert f2fs_fsync_node_pages() " Vishal Moola (Oracle)
2022-10-24 19:31   ` Vishal Moola
2022-11-10 18:51     ` Vishal Moola
2022-10-29  4:46   ` [f2fs-dev] " Chao Yu
2022-10-17 20:24 ` [PATCH v3 12/23] f2fs: Convert f2fs_flush_inline_data() " Vishal Moola (Oracle)
2022-10-29  4:47   ` [f2fs-dev] " Chao Yu
2022-10-17 20:24 ` [PATCH v3 13/23] f2fs: Convert f2fs_sync_node_pages() " Vishal Moola (Oracle)
2022-10-29  4:47   ` [f2fs-dev] " Chao Yu
2022-10-17 20:24 ` [PATCH v3 14/23] f2fs: Convert f2fs_write_cache_pages() " Vishal Moola (Oracle)
2022-11-14  7:02   ` [f2fs-dev] " Chao Yu
2022-11-14 21:38     ` Vishal Moola
2022-11-23  2:26       ` Vishal Moola
2022-11-23  7:51         ` Vishal Moola
2022-12-05 20:34         ` Vishal Moola
2022-12-12 14:41           ` Chao Yu
2022-12-12 19:13             ` [RFC PATCH] " Vishal Moola (Oracle)
2022-12-15  1:48               ` Chao Yu
2022-12-15 18:45                 ` Matthew Wilcox
2022-12-21 17:17                   ` Vishal Moola
2022-12-23  8:07                     ` Christoph Hellwig
2022-12-15 19:02               ` Jaegeuk Kim [this message]
2023-01-03 20:53                 ` Matthew Wilcox
2022-11-29 19:14     ` [f2fs-dev] [PATCH v3 14/23] " Matthew Wilcox
2022-11-30 12:48       ` [PATCH] f2fs: Support enhanced hot/cold data separation for f2fs Yangtao Li
2022-11-30 15:18         ` Matthew Wilcox
2022-12-07 20:51           ` Luis Chamberlain
2024-01-25 20:47             ` Matthew Wilcox
2024-01-25 20:54               ` Luis Chamberlain
2024-01-26 21:01                 ` Matthew Wilcox
2024-01-26 21:32                   ` Luis Chamberlain
2024-01-27  7:05                     ` Eric Biggers
2022-11-30 12:51       ` [PATCH]f2fs: Convert f2fs_write_cache_pages() to use filemap_get_folios_tag() Yangtao Li
2022-10-17 20:24 ` [PATCH v3 15/23] f2fs: Convert last_fsync_dnode() " Vishal Moola (Oracle)
2022-10-17 20:24 ` [PATCH v3 16/23] f2fs: Convert f2fs_sync_meta_pages() " Vishal Moola (Oracle)
2022-10-17 20:24 ` [PATCH v3 17/23] gfs2: Convert gfs2_write_cache_jdata() " Vishal Moola (Oracle)
2022-10-24 19:23   ` Vishal Moola
2022-10-17 20:24 ` [PATCH v3 18/23] nilfs2: Convert nilfs_lookup_dirty_data_buffers() " Vishal Moola (Oracle)
2022-10-17 20:24 ` [PATCH v3 19/23] nilfs2: Convert nilfs_lookup_dirty_node_buffers() " Vishal Moola (Oracle)
2022-10-17 20:24 ` [PATCH v3 20/23] nilfs2: Convert nilfs_btree_lookup_dirty_buffers() " Vishal Moola (Oracle)
2022-10-17 20:24 ` [PATCH v3 21/23] nilfs2: Convert nilfs_copy_dirty_pages() " Vishal Moola (Oracle)
2022-10-17 20:24 ` [PATCH v3 22/23] nilfs2: Convert nilfs_clear_dirty_pages() " Vishal Moola (Oracle)
2022-10-17 20:24 ` [PATCH v3 23/23] filemap: Remove find_get_pages_range_tag() Vishal Moola (Oracle)

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=Y5tvQKT8HWxngEnc@google.com \
    --to=jaegeuk@kernel.org \
    --cc=chao@kernel.org \
    --cc=fengnanchang@gmail.com \
    --cc=linux-f2fs-devel@lists.sourceforge.net \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=vishal.moola@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).