Linux block layer
 help / color / mirror / Atom feed
From: "Darrick J. Wong" <djwong@kernel.org>
To: Christoph Hellwig <hch@lst.de>
Cc: Jens Axboe <axboe@kernel.dk>,
	Christian Brauner <brauner@kernel.org>,
	Carlos Maiolino <cem@kernel.org>, Qu Wenruo <wqu@suse.com>,
	Al Viro <viro@zeniv.linux.org.uk>,
	linux-block@vger.kernel.org, linux-xfs@vger.kernel.org,
	linux-fsdevel@vger.kernel.org
Subject: Re: [PATCH 08/14] iomap: split out the per-bio logic from iomap_dio_bio_iter
Date: Wed, 14 Jan 2026 14:53:07 -0800	[thread overview]
Message-ID: <20260114225307.GM15551@frogsfrogsfrogs> (raw)
In-Reply-To: <20260114074145.3396036-9-hch@lst.de>

On Wed, Jan 14, 2026 at 08:41:06AM +0100, Christoph Hellwig wrote:
> Factor out a separate helper that builds and submits a single bio.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>

Looks like a reasonable straightforward hoist,
Reviewed-by: "Darrick J. Wong" <djwong@kernel.org>

--D

> ---
>  fs/iomap/direct-io.c | 111 +++++++++++++++++++++++--------------------
>  1 file changed, 59 insertions(+), 52 deletions(-)
> 
> diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
> index 1acdab7cf5f1..63374ba83b55 100644
> --- a/fs/iomap/direct-io.c
> +++ b/fs/iomap/direct-io.c
> @@ -301,6 +301,56 @@ static int iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
>  	return 0;
>  }
>  
> +static ssize_t iomap_dio_bio_iter_one(struct iomap_iter *iter,
> +		struct iomap_dio *dio, loff_t pos, unsigned int alignment,
> +		blk_opf_t op)
> +{
> +	struct bio *bio;
> +	ssize_t ret;
> +
> +	bio = iomap_dio_alloc_bio(iter, dio,
> +			bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS),
> +			op);
> +	fscrypt_set_bio_crypt_ctx(bio, iter->inode,
> +			pos >> iter->inode->i_blkbits, GFP_KERNEL);
> +	bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
> +	bio->bi_write_hint = iter->inode->i_write_hint;
> +	bio->bi_ioprio = dio->iocb->ki_ioprio;
> +	bio->bi_private = dio;
> +	bio->bi_end_io = iomap_dio_bio_end_io;
> +
> +	ret = bio_iov_iter_get_pages(bio, dio->submit.iter, alignment - 1);
> +	if (unlikely(ret))
> +		goto out_put_bio;
> +	ret = bio->bi_iter.bi_size;
> +
> +	/*
> +	 * An atomic write bio must cover the complete length.  If it doesn't,
> +	 * error out.
> +	 */
> +	if ((op & REQ_ATOMIC) && WARN_ON_ONCE(ret != iomap_length(iter))) {
> +		ret = -EINVAL;
> +		goto out_put_bio;
> +	}
> +
> +	if (dio->flags & IOMAP_DIO_WRITE)
> +		task_io_account_write(ret);
> +	else if (dio->flags & IOMAP_DIO_DIRTY)
> +		bio_set_pages_dirty(bio);
> +
> +	/*
> +	 * We can only poll for single bio I/Os.
> +	 */
> +	if (iov_iter_count(dio->submit.iter))
> +		dio->iocb->ki_flags &= ~IOCB_HIPRI;
> +	iomap_dio_submit_bio(iter, dio, bio, pos);
> +	return ret;
> +
> +out_put_bio:
> +	bio_put(bio);
> +	return ret;
> +}
> +
>  static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
>  {
>  	const struct iomap *iomap = &iter->iomap;
> @@ -309,12 +359,11 @@ static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
>  	const loff_t length = iomap_length(iter);
>  	loff_t pos = iter->pos;
>  	blk_opf_t bio_opf = REQ_SYNC | REQ_IDLE;
> -	struct bio *bio;
>  	bool need_zeroout = false;
> -	int ret = 0;
>  	u64 copied = 0;
>  	size_t orig_count;
>  	unsigned int alignment;
> +	ssize_t ret = 0;
>  
>  	/*
>  	 * File systems that write out of place and always allocate new blocks
> @@ -440,68 +489,27 @@ static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
>  	}
>  
>  	do {
> -		size_t n;
> -
>  		/*
>  		 * If completions already occurred and reported errors, give up now and
>  		 * don't bother submitting more bios.
>  		 */
> -		if (unlikely(data_race(dio->error))) {
> -			ret = 0;
> +		if (unlikely(data_race(dio->error)))
>  			goto out;
> -		}
>  
> -		bio = iomap_dio_alloc_bio(iter, dio,
> -				bio_iov_vecs_to_alloc(dio->submit.iter,
> -						BIO_MAX_VECS), bio_opf);
> -		fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
> -					  GFP_KERNEL);
> -		bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
> -		bio->bi_write_hint = inode->i_write_hint;
> -		bio->bi_ioprio = dio->iocb->ki_ioprio;
> -		bio->bi_private = dio;
> -		bio->bi_end_io = iomap_dio_bio_end_io;
> -
> -		ret = bio_iov_iter_get_pages(bio, dio->submit.iter,
> -					     alignment - 1);
> -		if (unlikely(ret)) {
> +		ret = iomap_dio_bio_iter_one(iter, dio, pos, alignment, bio_opf);
> +		if (unlikely(ret < 0)) {
>  			/*
>  			 * We have to stop part way through an IO. We must fall
>  			 * through to the sub-block tail zeroing here, otherwise
>  			 * this short IO may expose stale data in the tail of
>  			 * the block we haven't written data to.
>  			 */
> -			bio_put(bio);
> -			goto zero_tail;
> -		}
> -
> -		n = bio->bi_iter.bi_size;
> -		if (WARN_ON_ONCE((bio_opf & REQ_ATOMIC) && n != length)) {
> -			/*
> -			 * An atomic write bio must cover the complete length,
> -			 * which it doesn't, so error. We may need to zero out
> -			 * the tail (complete FS block), similar to when
> -			 * bio_iov_iter_get_pages() returns an error, above.
> -			 */
> -			ret = -EINVAL;
> -			bio_put(bio);
> -			goto zero_tail;
> +			break;
>  		}
> -		if (dio->flags & IOMAP_DIO_WRITE)
> -			task_io_account_write(n);
> -		else if (dio->flags & IOMAP_DIO_DIRTY)
> -			bio_set_pages_dirty(bio);
> -
> -		dio->size += n;
> -		copied += n;
> -
> -		/*
> -		 * We can only poll for single bio I/Os.
> -		 */
> -		if (iov_iter_count(dio->submit.iter))
> -			dio->iocb->ki_flags &= ~IOCB_HIPRI;
> -		iomap_dio_submit_bio(iter, dio, bio, pos);
> -		pos += n;
> +		dio->size += ret;
> +		copied += ret;
> +		pos += ret;
> +		ret = 0;
>  	} while (iov_iter_count(dio->submit.iter));
>  
>  	/*
> @@ -510,7 +518,6 @@ static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
>  	 * the block tail in the latter case, we can expose stale data via mmap
>  	 * reads of the EOF block.
>  	 */
> -zero_tail:
>  	if (need_zeroout ||
>  	    ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) {
>  		/* zero out from the end of the write to the end of the block */
> -- 
> 2.47.3
> 
> 

  reply	other threads:[~2026-01-14 22:53 UTC|newest]

Thread overview: 37+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-01-14  7:40 bounce buffer direct I/O when stable pages are required Christoph Hellwig
2026-01-14  7:40 ` [PATCH 01/14] block: refactor get_contig_folio_len Christoph Hellwig
2026-01-14  7:41 ` [PATCH 02/14] block: open code bio_add_page and fix handling of mismatching P2P ranges Christoph Hellwig
2026-01-14 12:46   ` Johannes Thumshirn
2026-01-14 13:01     ` hch
2026-01-14  7:41 ` [PATCH 03/14] iov_iter: extract a iov_iter_extract_bvecs helper from bio code Christoph Hellwig
2026-01-14  7:41 ` [PATCH 04/14] block: remove bio_release_page Christoph Hellwig
2026-01-14  7:41 ` [PATCH 05/14] block: add helpers to bounce buffer an iov_iter into bios Christoph Hellwig
2026-01-14 12:51   ` Johannes Thumshirn
2026-01-14  7:41 ` [PATCH 06/14] iomap: fix submission side handling of completion side errors Christoph Hellwig
2026-01-14 22:35   ` Darrick J. Wong
2026-01-15  6:17     ` Christoph Hellwig
2026-01-14  7:41 ` [PATCH 07/14] iomap: simplify iomap_dio_bio_iter Christoph Hellwig
2026-01-14 22:51   ` Darrick J. Wong
2026-01-15  6:20     ` Christoph Hellwig
2026-01-14  7:41 ` [PATCH 08/14] iomap: split out the per-bio logic from iomap_dio_bio_iter Christoph Hellwig
2026-01-14 22:53   ` Darrick J. Wong [this message]
2026-01-14  7:41 ` [PATCH 09/14] iomap: share code between iomap_dio_bio_end_io and iomap_finish_ioend_direct Christoph Hellwig
2026-01-14 22:54   ` Darrick J. Wong
2026-01-14  7:41 ` [PATCH 10/14] iomap: free the bio before completing the dio Christoph Hellwig
2026-01-14 22:55   ` Darrick J. Wong
2026-01-15  6:21     ` Christoph Hellwig
2026-01-14  7:41 ` [PATCH 11/14] iomap: rename IOMAP_DIO_DIRTY to IOMAP_DIO_USER_BACKED Christoph Hellwig
2026-01-14 22:56   ` Darrick J. Wong
2026-01-14  7:41 ` [PATCH 12/14] iomap: support ioends for direct reads Christoph Hellwig
2026-01-14 22:57   ` Darrick J. Wong
2026-01-15  6:21     ` Christoph Hellwig
2026-01-14  7:41 ` [PATCH 13/14] iomap: add a flag to bounce buffer direct I/O Christoph Hellwig
2026-01-14 22:59   ` Darrick J. Wong
2026-01-15  6:21     ` Christoph Hellwig
2026-01-14  7:41 ` [PATCH 14/14] xfs: use bounce buffering direct I/O when the device requires stable pages Christoph Hellwig
2026-01-14 23:07   ` Darrick J. Wong
2026-01-15  6:24     ` Christoph Hellwig
2026-01-14  9:52 ` bounce buffer direct I/O when stable pages are required Qu Wenruo
2026-01-14 12:39   ` Christoph Hellwig
  -- strict thread matches above, loose matches on Subject: below --
2026-01-19  7:44 bounce buffer direct I/O when stable pages are required v2 Christoph Hellwig
2026-01-19  7:44 ` [PATCH 08/14] iomap: split out the per-bio logic from iomap_dio_bio_iter Christoph Hellwig
2026-01-23  8:57   ` Damien Le Moal

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260114225307.GM15551@frogsfrogsfrogs \
    --to=djwong@kernel.org \
    --cc=axboe@kernel.dk \
    --cc=brauner@kernel.org \
    --cc=cem@kernel.org \
    --cc=hch@lst.de \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-xfs@vger.kernel.org \
    --cc=viro@zeniv.linux.org.uk \
    --cc=wqu@suse.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox