From: Nikolay Borisov <nborisov@suse.com>
To: David Sterba <dsterba@suse.com>, linux-btrfs@vger.kernel.org
Subject: Re: [PATCH 2/3] btrfs: pass only eb to num_extent_pages
Date: Tue, 24 Apr 2018 16:26:27 +0300 [thread overview]
Message-ID: <c213af39-6161-5643-925b-bd6c36fe574e@suse.com> (raw)
In-Reply-To: <ec1c1d8fd32993aa5271f3f9b4160bd2df62ba03.1524523682.git.dsterba@suse.com>
On 24.04.2018 02:03, David Sterba wrote:
> Almost all callers pass the start and len as 2 arguments but this is not
> necessary, all the information is provided by the eb. By reordering the
> calls to num_extent_pages, we don't need the local variables with
> start/len.
>
> Signed-off-by: David Sterba <dsterba@suse.com>
Reviewed-by: Nikolay Borisov <nborisov@suse.com>
> ---
> fs/btrfs/extent_io.c | 30 +++++++++++++++---------------
> fs/btrfs/extent_io.h | 4 ++--
> 2 files changed, 17 insertions(+), 17 deletions(-)
>
> diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
> index fb32394fd830..0cc5d6ae1876 100644
> --- a/fs/btrfs/extent_io.c
> +++ b/fs/btrfs/extent_io.c
> @@ -2062,7 +2062,7 @@ int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
> struct extent_buffer *eb, int mirror_num)
> {
> u64 start = eb->start;
> - unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
> + unsigned long i, num_pages = num_extent_pages(eb);
> int ret = 0;
>
> if (sb_rdonly(fs_info->sb))
> @@ -3591,7 +3591,7 @@ lock_extent_buffer_for_io(struct extent_buffer *eb,
> if (!ret)
> return ret;
>
> - num_pages = num_extent_pages(eb->start, eb->len);
> + num_pages = num_extent_pages(eb);
> for (i = 0; i < num_pages; i++) {
> struct page *p = eb->pages[i];
>
> @@ -3721,7 +3721,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
> int ret = 0;
>
> clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
> - num_pages = num_extent_pages(eb->start, eb->len);
> + num_pages = num_extent_pages(eb);
> atomic_set(&eb->io_pages, num_pages);
>
> /* set btree blocks beyond nritems with 0 to avoid stale content. */
> @@ -4654,7 +4654,7 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
>
> BUG_ON(extent_buffer_under_io(eb));
>
> - index = num_extent_pages(eb->start, eb->len);
> + index = num_extent_pages(eb);
> if (index == 0)
> return;
>
> @@ -4747,7 +4747,7 @@ struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
> unsigned long i;
> struct page *p;
> struct extent_buffer *new;
> - unsigned long num_pages = num_extent_pages(src->start, src->len);
> + unsigned long num_pages = num_extent_pages(src);
>
> new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
> if (new == NULL)
> @@ -4779,12 +4779,11 @@ struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
> unsigned long num_pages;
> unsigned long i;
>
> - num_pages = num_extent_pages(start, len);
> -
> eb = __alloc_extent_buffer(fs_info, start, len);
> if (!eb)
> return NULL;
>
> + num_pages = num_extent_pages(eb);
> for (i = 0; i < num_pages; i++) {
> eb->pages[i] = alloc_page(GFP_NOFS);
> if (!eb->pages[i])
> @@ -4848,7 +4847,7 @@ static void mark_extent_buffer_accessed(struct extent_buffer *eb,
>
> check_buffer_tree_ref(eb);
>
> - num_pages = num_extent_pages(eb->start, eb->len);
> + num_pages = num_extent_pages(eb);
> for (i = 0; i < num_pages; i++) {
> struct page *p = eb->pages[i];
>
> @@ -4945,7 +4944,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
> u64 start)
> {
> unsigned long len = fs_info->nodesize;
> - unsigned long num_pages = num_extent_pages(start, len);
> + unsigned long num_pages;
> unsigned long i;
> unsigned long index = start >> PAGE_SHIFT;
> struct extent_buffer *eb;
> @@ -4968,6 +4967,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
> if (!eb)
> return ERR_PTR(-ENOMEM);
>
> + num_pages = num_extent_pages(eb);
> for (i = 0; i < num_pages; i++, index++) {
> p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
> if (!p) {
> @@ -5164,7 +5164,7 @@ void clear_extent_buffer_dirty(struct extent_buffer *eb)
> unsigned long num_pages;
> struct page *page;
>
> - num_pages = num_extent_pages(eb->start, eb->len);
> + num_pages = num_extent_pages(eb);
>
> for (i = 0; i < num_pages; i++) {
> page = eb->pages[i];
> @@ -5198,7 +5198,7 @@ int set_extent_buffer_dirty(struct extent_buffer *eb)
>
> was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
>
> - num_pages = num_extent_pages(eb->start, eb->len);
> + num_pages = num_extent_pages(eb);
> WARN_ON(atomic_read(&eb->refs) == 0);
> WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
>
> @@ -5214,7 +5214,7 @@ void clear_extent_buffer_uptodate(struct extent_buffer *eb)
> unsigned long num_pages;
>
> clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
> - num_pages = num_extent_pages(eb->start, eb->len);
> + num_pages = num_extent_pages(eb);
> for (i = 0; i < num_pages; i++) {
> page = eb->pages[i];
> if (page)
> @@ -5229,7 +5229,7 @@ void set_extent_buffer_uptodate(struct extent_buffer *eb)
> unsigned long num_pages;
>
> set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
> - num_pages = num_extent_pages(eb->start, eb->len);
> + num_pages = num_extent_pages(eb);
> for (i = 0; i < num_pages; i++) {
> page = eb->pages[i];
> SetPageUptodate(page);
> @@ -5253,7 +5253,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
> if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
> return 0;
>
> - num_pages = num_extent_pages(eb->start, eb->len);
> + num_pages = num_extent_pages(eb);
> for (i = 0; i < num_pages; i++) {
> page = eb->pages[i];
> if (wait == WAIT_NONE) {
> @@ -5581,7 +5581,7 @@ void copy_extent_buffer_full(struct extent_buffer *dst,
>
> ASSERT(dst->len == src->len);
>
> - num_pages = num_extent_pages(dst->start, dst->len);
> + num_pages = num_extent_pages(dst);
> for (i = 0; i < num_pages; i++)
> copy_page(page_address(dst->pages[i]),
> page_address(src->pages[i]));
> diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
> index ee92c1289edd..d08abc9d385e 100644
> --- a/fs/btrfs/extent_io.h
> +++ b/fs/btrfs/extent_io.h
> @@ -452,9 +452,9 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
> int mirror_num);
> void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
>
> -static inline unsigned long num_extent_pages(u64 start, u64 len)
> +static inline unsigned long num_extent_pages(const struct extent_buffer *eb)
> {
> - return len >> PAGE_SHIFT;
> + return eb->len >> PAGE_SHIFT;
> }
>
> static inline void extent_buffer_get(struct extent_buffer *eb)
>
next prev parent reply other threads:[~2018-04-24 13:26 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-04-23 23:03 [PATCH 0/3] Simplify counting of extent buffer pages David Sterba
2018-04-23 23:03 ` [PATCH 1/3] btrfs: simplify counting number of eb pages David Sterba
2018-04-24 5:59 ` Nikolay Borisov
2018-04-24 6:22 ` Qu Wenruo
2018-04-24 10:29 ` David Sterba
2018-04-24 10:36 ` Qu Wenruo
2018-04-23 23:03 ` [PATCH 2/3] btrfs: pass only eb to num_extent_pages David Sterba
2018-04-24 13:26 ` Nikolay Borisov [this message]
2018-04-23 23:03 ` [PATCH 3/3] btrfs: switch types to int when counting eb pages David Sterba
2018-04-24 13:26 ` Nikolay Borisov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=c213af39-6161-5643-925b-bd6c36fe574e@suse.com \
--to=nborisov@suse.com \
--cc=dsterba@suse.com \
--cc=linux-btrfs@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox