From: David Sterba <dsterba@suse.cz>
To: Qu Wenruo <wqu@suse.com>
Cc: linux-btrfs@vger.kernel.org
Subject: Re: [PATCH] btrfs: migrate extent_buffer::pages[] to folio
Date: Fri, 1 Dec 2023 00:22:01 +0100 [thread overview]
Message-ID: <20231130232201.GX18929@twin.jikos.cz> (raw)
In-Reply-To: <b87c95b697347980b008d8140ceec49590af4f5d.1701037103.git.wqu@suse.com>
On Mon, Nov 27, 2023 at 08:48:45AM +1030, Qu Wenruo wrote:
> For now extent_buffer::pages[] are still only accept single page
> pointer, thus we can migrate to folios pretty easily.
>
> As for single page, page and folio are 1:1 mapped.
>
> This patch would just do the conversion from struct page to struct
> folio, providing the first step to higher order folio in the future.
>
> Signed-off-by: Qu Wenruo <wqu@suse.com>
> ---
> Some existing infrastructure like get_eb_page_index() can be used to
> help the conversion to higher order folio.
>
> Initially I tried to keep using page pointers for compound pages,
> but since they are using the same page pointer, it's pretty easy to get
> confused on whether the page is single or compound, and lead to various
> VM BUGs.
>
> Migrating to folio would address the problem much easier.
>
> Unfortunately this patch is just migrating extent_buffer::pages[] type
> to folio, we still have a lot of things worthy cleanup, but it should
> provide the last preparation needed for higher order folio integration.
> ---
> fs/btrfs/accessors.c | 18 ++---
> fs/btrfs/accessors.h | 4 +-
> fs/btrfs/ctree.c | 2 +-
> fs/btrfs/disk-io.c | 18 ++---
> fs/btrfs/extent_io.c | 134 +++++++++++++++++--------------
> fs/btrfs/extent_io.h | 2 +-
> fs/btrfs/tests/extent-io-tests.c | 6 +-
> 7 files changed, 98 insertions(+), 86 deletions(-)
>
> diff --git a/fs/btrfs/accessors.c b/fs/btrfs/accessors.c
> index 206cf1612c1d..ecc204728475 100644
> --- a/fs/btrfs/accessors.c
> +++ b/fs/btrfs/accessors.c
> @@ -27,7 +27,7 @@ static bool check_setget_bounds(const struct extent_buffer *eb,
> void btrfs_init_map_token(struct btrfs_map_token *token, struct extent_buffer *eb)
> {
> token->eb = eb;
> - token->kaddr = page_address(eb->pages[0]);
> + token->kaddr = folio_address(eb->folios[0]);
> token->offset = 0;
> }
>
> @@ -74,13 +74,13 @@ u##bits btrfs_get_token_##bits(struct btrfs_map_token *token, \
> member_offset + size <= token->offset + PAGE_SIZE) { \
> return get_unaligned_le##bits(token->kaddr + oip); \
> } \
> - token->kaddr = page_address(token->eb->pages[idx]); \
> + token->kaddr = folio_address(token->eb->folios[idx]); \
> token->offset = idx << PAGE_SHIFT; \
> if (INLINE_EXTENT_BUFFER_PAGES == 1 || oip + size <= PAGE_SIZE ) \
> return get_unaligned_le##bits(token->kaddr + oip); \
> \
> memcpy(lebytes, token->kaddr + oip, part); \
> - token->kaddr = page_address(token->eb->pages[idx + 1]); \
> + token->kaddr = folio_address(token->eb->folios[idx + 1]); \
> token->offset = (idx + 1) << PAGE_SHIFT; \
> memcpy(lebytes + part, token->kaddr, size - part); \
> return get_unaligned_le##bits(lebytes); \
> @@ -91,7 +91,7 @@ u##bits btrfs_get_##bits(const struct extent_buffer *eb, \
> const unsigned long member_offset = (unsigned long)ptr + off; \
> const unsigned long oip = get_eb_offset_in_page(eb, member_offset); \
> const unsigned long idx = get_eb_page_index(member_offset); \
> - char *kaddr = page_address(eb->pages[idx]); \
> + char *kaddr = folio_address(eb->folios[idx]); \
> const int size = sizeof(u##bits); \
> const int part = PAGE_SIZE - oip; \
> u8 lebytes[sizeof(u##bits)]; \
> @@ -101,7 +101,7 @@ u##bits btrfs_get_##bits(const struct extent_buffer *eb, \
> return get_unaligned_le##bits(kaddr + oip); \
> \
> memcpy(lebytes, kaddr + oip, part); \
> - kaddr = page_address(eb->pages[idx + 1]); \
> + kaddr = folio_address(eb->folios[idx + 1]); \
> memcpy(lebytes + part, kaddr, size - part); \
> return get_unaligned_le##bits(lebytes); \
> } \
> @@ -125,7 +125,7 @@ void btrfs_set_token_##bits(struct btrfs_map_token *token, \
> put_unaligned_le##bits(val, token->kaddr + oip); \
> return; \
> } \
> - token->kaddr = page_address(token->eb->pages[idx]); \
> + token->kaddr = folio_address(token->eb->folios[idx]); \
> token->offset = idx << PAGE_SHIFT; \
> if (INLINE_EXTENT_BUFFER_PAGES == 1 || oip + size <= PAGE_SIZE) { \
> put_unaligned_le##bits(val, token->kaddr + oip); \
> @@ -133,7 +133,7 @@ void btrfs_set_token_##bits(struct btrfs_map_token *token, \
> } \
> put_unaligned_le##bits(val, lebytes); \
> memcpy(token->kaddr + oip, lebytes, part); \
> - token->kaddr = page_address(token->eb->pages[idx + 1]); \
> + token->kaddr = folio_address(token->eb->folios[idx + 1]); \
> token->offset = (idx + 1) << PAGE_SHIFT; \
> memcpy(token->kaddr, lebytes + part, size - part); \
> } \
> @@ -143,7 +143,7 @@ void btrfs_set_##bits(const struct extent_buffer *eb, void *ptr, \
> const unsigned long member_offset = (unsigned long)ptr + off; \
> const unsigned long oip = get_eb_offset_in_page(eb, member_offset); \
> const unsigned long idx = get_eb_page_index(member_offset); \
> - char *kaddr = page_address(eb->pages[idx]); \
> + char *kaddr = folio_address(eb->folios[idx]); \
> const int size = sizeof(u##bits); \
> const int part = PAGE_SIZE - oip; \
> u8 lebytes[sizeof(u##bits)]; \
> @@ -156,7 +156,7 @@ void btrfs_set_##bits(const struct extent_buffer *eb, void *ptr, \
> \
> put_unaligned_le##bits(val, lebytes); \
> memcpy(kaddr + oip, lebytes, part); \
> - kaddr = page_address(eb->pages[idx + 1]); \
> + kaddr = folio_address(eb->folios[idx + 1]); \
> memcpy(kaddr, lebytes + part, size - part); \
> }
>
> diff --git a/fs/btrfs/accessors.h b/fs/btrfs/accessors.h
> index aa0844535644..ed7aa32972ad 100644
> --- a/fs/btrfs/accessors.h
> +++ b/fs/btrfs/accessors.h
> @@ -90,14 +90,14 @@ static inline void btrfs_set_token_##name(struct btrfs_map_token *token,\
> #define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \
> static inline u##bits btrfs_##name(const struct extent_buffer *eb) \
> { \
> - const type *p = page_address(eb->pages[0]) + \
> + const type *p = folio_address(eb->folios[0]) + \
> offset_in_page(eb->start); \
> return get_unaligned_le##bits(&p->member); \
> } \
> static inline void btrfs_set_##name(const struct extent_buffer *eb, \
> u##bits val) \
> { \
> - type *p = page_address(eb->pages[0]) + offset_in_page(eb->start); \
> + type *p = folio_address(eb->folios[0]) + offset_in_page(eb->start); \
> put_unaligned_le##bits(val, &p->member); \
> }
>
> diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
> index 137c4eb24c28..e6c535cf3749 100644
> --- a/fs/btrfs/ctree.c
> +++ b/fs/btrfs/ctree.c
> @@ -832,7 +832,7 @@ int btrfs_bin_search(struct extent_buffer *eb, int first_slot,
>
> if (oip + key_size <= PAGE_SIZE) {
> const unsigned long idx = get_eb_page_index(offset);
> - char *kaddr = page_address(eb->pages[idx]);
> + char *kaddr = folio_address(eb->folios[idx]);
>
> oip = get_eb_offset_in_page(eb, offset);
> tmp = (struct btrfs_disk_key *)(kaddr + oip);
> diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
> index 7fc78171a262..df9d860efc20 100644
> --- a/fs/btrfs/disk-io.c
> +++ b/fs/btrfs/disk-io.c
> @@ -90,14 +90,13 @@ static void csum_tree_block(struct extent_buffer *buf, u8 *result)
> return;
> }
>
> - kaddr = page_address(buf->pages[0]) + offset_in_page(buf->start);
> + kaddr = folio_address(buf->folios[0]) + offset_in_page(buf->start);
> crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE,
> first_page_part - BTRFS_CSUM_SIZE);
>
> - for (i = 1; i < num_pages && INLINE_EXTENT_BUFFER_PAGES > 1; i++) {
> - kaddr = page_address(buf->pages[i]);
> - crypto_shash_update(shash, kaddr, PAGE_SIZE);
> - }
> + for (i = 1; i < num_pages && INLINE_EXTENT_BUFFER_PAGES > 1; i++)
> + crypto_shash_update(shash, folio_address(buf->folios[i]),
> + PAGE_SIZE);
> crypto_shash_final(shash, result);
> }
>
> @@ -180,7 +179,7 @@ static int btrfs_repair_eb_io_failure(const struct extent_buffer *eb,
> return -EROFS;
>
> for (i = 0; i < num_pages; i++) {
> - struct page *p = eb->pages[i];
> + struct page *p = folio_page(eb->folios[i], 0);
> u64 start = max_t(u64, eb->start, page_offset(p));
> u64 end = min_t(u64, eb->start + eb->len, page_offset(p) + PAGE_SIZE);
> u32 len = end - start;
> @@ -268,8 +267,9 @@ blk_status_t btree_csum_one_bio(struct btrfs_bio *bbio)
>
> if (WARN_ON_ONCE(found_start != eb->start))
> return BLK_STS_IOERR;
> - if (WARN_ON(!btrfs_page_test_uptodate(fs_info, eb->pages[0], eb->start,
> - eb->len)))
> + if (WARN_ON(!btrfs_page_test_uptodate(fs_info,
> + folio_page(eb->folios[0], 0), eb->start,
> + eb->len)))
> return BLK_STS_IOERR;
>
> ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid,
> @@ -378,7 +378,7 @@ int btrfs_validate_extent_buffer(struct extent_buffer *eb,
> }
>
> csum_tree_block(eb, result);
> - header_csum = page_address(eb->pages[0]) +
> + header_csum = folio_address(eb->folios[0]) +
> get_eb_offset_in_page(eb, offsetof(struct btrfs_header, csum));
>
> if (memcmp(result, header_csum, csum_size) != 0) {
> diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
> index fcd7b4674d08..96e15cbdc660 100644
> --- a/fs/btrfs/extent_io.c
> +++ b/fs/btrfs/extent_io.c
> @@ -715,6 +715,21 @@ int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array)
> return alloc_page_array(nr_pages, page_array, 0);
> }
>
> +static int alloc_eb_folio_array(unsigned int nr_pages,
> + struct folio **folio_array,
> + gfp_t extra_gfp)
> +{
> + struct page *page_array[INLINE_EXTENT_BUFFER_PAGES];
> + int ret;
> +
> + ret = alloc_page_array(nr_pages, page_array, extra_gfp);
alloc_page_array() got removed in v4 of "btrfs: refactor
alloc_extent_buffer() to allocate-then-attach metho", please refresh the
patch, thanks.
> + if (ret < 0)
> + return ret;
> + for (int i = 0; i < nr_pages; i++)
> + folio_array[i] = page_folio(page_array[i]);
> + return 0;
> +}
prev parent reply other threads:[~2023-11-30 23:29 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-11-26 22:18 [PATCH] btrfs: migrate extent_buffer::pages[] to folio Qu Wenruo
2023-11-27 16:32 ` Josef Bacik
2023-11-27 22:17 ` Qu Wenruo
2023-11-29 16:02 ` David Sterba
2023-11-30 6:56 ` Qu Wenruo
2023-11-30 12:19 ` Neal Gompa
2023-11-30 21:04 ` Qu Wenruo
2023-11-30 23:18 ` David Sterba
2023-11-30 23:33 ` Qu Wenruo
2023-11-30 23:22 ` David Sterba [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231130232201.GX18929@twin.jikos.cz \
--to=dsterba@suse.cz \
--cc=linux-btrfs@vger.kernel.org \
--cc=wqu@suse.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox