public inbox for linux-xfs@vger.kernel.org
 help / color / mirror / Atom feed
From: Christoph Hellwig <hch@infradead.org>
To: Dave Chinner <david@fromorbit.com>
Cc: linux-xfs@vger.kernel.org
Subject: Re: [PATCH 9/9] xfs: rename bp->b_folio_count
Date: Tue, 19 Mar 2024 00:37:09 -0700	[thread overview]
Message-ID: <ZflApWnBkHDmo4HJ@infradead.org> (raw)
In-Reply-To: <20240318224715.3367463-10-david@fromorbit.com>

On Tue, Mar 19, 2024 at 09:46:00AM +1100, Dave Chinner wrote:
> From: Dave Chinner <dchinner@redhat.com>
> 
> The count is used purely to allocate the correct number of bvecs for
> submitting IO. Rename it to b_bvec_count.

Well, I think we should just kill it as it simplies is the rounded
up length in PAGE_SIZE units.  The patch below passes a quick xfstests
run and is on top of this series:

diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 2a6796c48454f7..8ecf88b5504c18 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -67,27 +67,17 @@ static inline bool xfs_buf_is_uncached(struct xfs_buf *bp)
 }
 
 /*
- * Return true if the buffer is vmapped.
- *
- * b_addr is always set, so we have to look at bp->b_bvec_count to determine if
- * the buffer was vmalloc()d or not.
+ * See comment above xfs_buf_alloc_folios() about the constraints placed on
+ * allocating vmapped buffers.
  */
-static inline int
-xfs_buf_is_vmapped(
-	struct xfs_buf	*bp)
+static inline unsigned int xfs_buf_vmap_len(struct xfs_buf *bp)
 {
-	return bp->b_bvec_count > 1;
+	return roundup(BBTOB(bp->b_length), PAGE_SIZE);
 }
 
-/*
- * See comment above xfs_buf_alloc_folios() about the constraints placed on
- * allocating vmapped buffers.
- */
-static inline int
-xfs_buf_vmap_len(
-	struct xfs_buf	*bp)
+static inline unsigned int xfs_buf_nr_pages(struct xfs_buf *bp)
 {
-	return (bp->b_bvec_count * PAGE_SIZE);
+	return DIV_ROUND_UP(BBTOB(bp->b_length), PAGE_SIZE);
 }
 
 /*
@@ -304,13 +294,15 @@ xfs_buf_free(
 		goto free;
 	}
 
-	if (!(bp->b_flags & _XBF_KMEM))
-		mm_account_reclaimed_pages(bp->b_bvec_count);
-
-	if (bp->b_flags & _XBF_FOLIOS)
-		__folio_put(kmem_to_folio(bp->b_addr));
-	else
+	if (bp->b_flags & _XBF_FOLIOS) {
+		/* XXX: should this pass xfs_buf_nr_pages()? */
+		mm_account_reclaimed_pages(1);
+		folio_put(kmem_to_folio(bp->b_addr));
+	} else {
+		if (!(bp->b_flags & _XBF_KMEM))
+			mm_account_reclaimed_pages(xfs_buf_nr_pages(bp));
 		kvfree(bp->b_addr);
+	}
 
 	bp->b_flags &= _XBF_KMEM | _XBF_FOLIOS;
 
@@ -341,7 +333,6 @@ xfs_buf_alloc_kmem(
 		bp->b_addr = NULL;
 		return -ENOMEM;
 	}
-	bp->b_bvec_count = 1;
 	bp->b_flags |= _XBF_KMEM;
 	return 0;
 }
@@ -369,7 +360,6 @@ xfs_buf_alloc_folio(
 		return false;
 
 	bp->b_addr = folio_address(folio);
-	bp->b_bvec_count = 1;
 	bp->b_flags |= _XBF_FOLIOS;
 	return true;
 }
@@ -441,7 +431,6 @@ xfs_buf_alloc_folios(
 			count);
 		return -ENOMEM;
 	}
-	bp->b_bvec_count = count;
 
 	return 0;
 }
@@ -1470,7 +1459,9 @@ xfs_buf_bio_end_io(
 		cmpxchg(&bp->b_io_error, 0, error);
 	}
 
-	if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
+	if (!bp->b_error &&
+	    (bp->b_flags & XBF_READ) &&
+	    is_vmalloc_addr(bp->b_addr))
 		invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
 
 	if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
@@ -1485,6 +1476,7 @@ xfs_buf_ioapply_map(
 	unsigned int	*buf_offset,
 	blk_opf_t	op)
 {
+	unsigned int	nr_vecs = 1;
 	struct bio	*bio;
 	int		size;
 
@@ -1494,7 +1486,9 @@ xfs_buf_ioapply_map(
 
 	atomic_inc(&bp->b_io_remaining);
 
-	bio = bio_alloc(bp->b_target->bt_bdev, bp->b_bvec_count, op, GFP_NOIO);
+	if (is_vmalloc_addr(bp->b_addr))
+		nr_vecs = xfs_buf_nr_pages(bp);
+	bio = bio_alloc(bp->b_target->bt_bdev, nr_vecs, op, GFP_NOIO);
 	bio->bi_iter.bi_sector = bp->b_maps[map].bm_bn;
 	bio->bi_end_io = xfs_buf_bio_end_io;
 	bio->bi_private = bp;
@@ -1511,7 +1505,7 @@ xfs_buf_ioapply_map(
 		*buf_offset += len;
 	} while (size);
 
-	if (xfs_buf_is_vmapped(bp))
+	if (is_vmalloc_addr(bp->b_addr))
 		flush_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
 	submit_bio(bio);
 }
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 32688525890bec..ad92d11f4ae173 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -195,7 +195,6 @@ struct xfs_buf {
 	int			b_map_count;
 	atomic_t		b_pin_count;	/* pin count */
 	atomic_t		b_io_remaining;	/* #outstanding I/O requests */
-	unsigned int		b_bvec_count;	/* bvecs needed for IO */
 	int			b_error;	/* error code on I/O */
 
 	/*
diff --git a/fs/xfs/xfs_buf_mem.c b/fs/xfs/xfs_buf_mem.c
index 30d53ddd6e6980..f082b1a64fc950 100644
--- a/fs/xfs/xfs_buf_mem.c
+++ b/fs/xfs/xfs_buf_mem.c
@@ -169,7 +169,6 @@ xmbuf_map_folio(
 	unlock_page(page);
 
 	bp->b_addr = page_address(page);
-	bp->b_bvec_count = 1;
 	return 0;
 }
 
@@ -182,7 +181,6 @@ xmbuf_unmap_folio(
 
 	folio_put(kmem_to_folio(bp->b_addr));
 	bp->b_addr = NULL;
-	bp->b_bvec_count = 0;
 }
 
 /* Is this a valid daddr within the buftarg? */

  reply	other threads:[~2024-03-19  7:37 UTC|newest]

Thread overview: 47+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-03-18 22:45 [PATCH v2 0/9] xfs: use large folios for buffers Dave Chinner
2024-03-18 22:45 ` [PATCH 1/9] xfs: unmapped buffer item size straddling mismatch Dave Chinner
2024-03-18 22:45 ` [PATCH 2/9] xfs: use folios in the buffer cache Dave Chinner
2024-03-19  6:38   ` Christoph Hellwig
2024-03-19  6:52     ` Dave Chinner
2024-03-19  6:53   ` Christoph Hellwig
2024-03-19 21:42     ` Dave Chinner
2024-03-19 21:42     ` Dave Chinner
2024-03-19 17:15   ` Darrick J. Wong
2024-03-18 22:45 ` [PATCH 3/9] xfs: convert buffer cache to use high order folios Dave Chinner
2024-03-19  6:55   ` Christoph Hellwig
2024-03-19 17:29   ` Darrick J. Wong
2024-03-19 21:32     ` Christoph Hellwig
2024-03-19 21:38       ` Darrick J. Wong
2024-03-19 21:41         ` Christoph Hellwig
2024-03-19 22:23           ` Dave Chinner
2024-03-21  2:12           ` Darrick J. Wong
2024-03-21  2:40             ` Darrick J. Wong
2024-03-21 21:28               ` Christoph Hellwig
2024-03-21 21:39                 ` Darrick J. Wong
2024-03-21 22:02                   ` Christoph Hellwig
2024-03-19 21:55     ` Dave Chinner
2024-03-22  8:02   ` Pankaj Raghav (Samsung)
2024-03-22 22:04     ` Dave Chinner
2024-03-25 11:17       ` Pankaj Raghav (Samsung)
2024-03-18 22:45 ` [PATCH 4/9] xfs: kill XBF_UNMAPPED Dave Chinner
2024-03-19 17:30   ` Darrick J. Wong
2024-03-19 23:36     ` Dave Chinner
2024-03-18 22:45 ` [PATCH 5/9] xfs: buffer items don't straddle pages anymore Dave Chinner
2024-03-19  6:56   ` Christoph Hellwig
2024-03-19 17:31   ` Darrick J. Wong
2024-03-18 22:45 ` [PATCH 6/9] xfs: map buffers in xfs_buf_alloc_folios Dave Chinner
2024-03-19 17:34   ` Darrick J. Wong
2024-03-19 21:32     ` Christoph Hellwig
2024-03-19 21:39       ` Darrick J. Wong
2024-03-19 21:41         ` Christoph Hellwig
2024-03-18 22:45 ` [PATCH 7/9] xfs: walk b_addr for buffer I/O Dave Chinner
2024-03-19 17:42   ` Darrick J. Wong
2024-03-19 21:33     ` Christoph Hellwig
2024-03-18 22:45 ` [PATCH 8/9] xfs: use vmalloc for multi-folio buffers Dave Chinner
2024-03-19 17:48   ` Darrick J. Wong
2024-03-20  0:20     ` Dave Chinner
2024-03-18 22:46 ` [PATCH 9/9] xfs: rename bp->b_folio_count Dave Chinner
2024-03-19  7:37   ` Christoph Hellwig [this message]
2024-03-19 23:59     ` Dave Chinner
2024-03-19  0:24 ` [PATCH v2 0/9] xfs: use large folios for buffers Christoph Hellwig
2024-03-19  0:44   ` Dave Chinner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ZflApWnBkHDmo4HJ@infradead.org \
    --to=hch@infradead.org \
    --cc=david@fromorbit.com \
    --cc=linux-xfs@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox