gfs2 filesystem and dlm development
 help / color / mirror / Atom feed
From: Gao Xiang <hsiangkao@linux.alibaba.com>
To: Joanne Koong <joannelkoong@gmail.com>,
	brauner@kernel.org, miklos@szeredi.hu
Cc: hch@infradead.org, djwong@kernel.org,
	linux-block@vger.kernel.org, gfs2@lists.linux.dev,
	linux-fsdevel@vger.kernel.org, kernel-team@meta.com,
	linux-xfs@vger.kernel.org, linux-doc@vger.kernel.org
Subject: Re: [PATCH v2 13/16] iomap: move read/readahead logic out of CONFIG_BLOCK guard
Date: Tue, 9 Sep 2025 10:14:46 +0800	[thread overview]
Message-ID: <a1529c0f-1f1a-477a-aeeb-a4f108aab26b@linux.alibaba.com> (raw)
In-Reply-To: <20250908185122.3199171-14-joannelkoong@gmail.com>



On 2025/9/9 02:51, Joanne Koong wrote:
> There is no longer a dependency on CONFIG_BLOCK in the iomap read and
> readahead logic. Move this logic out of the CONFIG_BLOCK guard. This
> allows non-block-based filesystems to use iomap for reads/readahead.
> 
> Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
> ---
>   fs/iomap/buffered-io.c | 151 +++++++++++++++++++++--------------------
>   1 file changed, 76 insertions(+), 75 deletions(-)
> 
> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> index f673e03f4ffb..c424e8c157dd 100644
> --- a/fs/iomap/buffered-io.c
> +++ b/fs/iomap/buffered-io.c
> @@ -358,81 +358,6 @@ void iomap_finish_folio_read(struct folio *folio, size_t off, size_t len,
>   }
>   EXPORT_SYMBOL_GPL(iomap_finish_folio_read);
>   
> -#ifdef CONFIG_BLOCK
> -static void iomap_read_end_io(struct bio *bio)
> -{
> -	int error = blk_status_to_errno(bio->bi_status);
> -	struct folio_iter fi;
> -
> -	bio_for_each_folio_all(fi, bio)
> -		iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
> -	bio_put(bio);
> -}
> -
> -static int iomap_submit_read_bio(struct iomap_read_folio_ctx *ctx)
> -{
> -	struct bio *bio = ctx->private;
> -
> -	if (bio)
> -		submit_bio(bio);
> -
> -	return 0;
> -}
> -
> -/**
> - * Read in a folio range asynchronously through bios.
> - *
> - * This should only be used for read/readahead, not for buffered writes.
> - * Buffered writes must read in the folio synchronously.
> - */
> -static int iomap_read_folio_range_bio_async(const struct iomap_iter *iter,
> -		struct iomap_read_folio_ctx *ctx, loff_t pos, size_t plen)
> -{
> -	struct folio *folio = ctx->cur_folio;
> -	const struct iomap *iomap = &iter->iomap;
> -	size_t poff = offset_in_folio(folio, pos);
> -	loff_t length = iomap_length(iter);
> -	sector_t sector;
> -	struct bio *bio = ctx->private;
> -
> -	iomap_start_folio_read(folio, plen);
> -
> -	sector = iomap_sector(iomap, pos);
> -	if (!bio || bio_end_sector(bio) != sector ||
> -	    !bio_add_folio(bio, folio, plen, poff)) {
> -		gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
> -		gfp_t orig_gfp = gfp;
> -		unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
> -
> -		iomap_submit_read_bio(ctx);
> -
> -		if (ctx->rac) /* same as readahead_gfp_mask */
> -			gfp |= __GFP_NORETRY | __GFP_NOWARN;
> -		bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs),
> -				     REQ_OP_READ, gfp);
> -		/*
> -		 * If the bio_alloc fails, try it again for a single page to
> -		 * avoid having to deal with partial page reads.  This emulates
> -		 * what do_mpage_read_folio does.
> -		 */
> -		if (!bio)
> -			bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, orig_gfp);
> -		if (ctx->rac)
> -			bio->bi_opf |= REQ_RAHEAD;
> -		bio->bi_iter.bi_sector = sector;
> -		bio->bi_end_io = iomap_read_end_io;
> -		bio_add_folio_nofail(bio, folio, plen, poff);
> -		ctx->private = bio;
> -	}
> -	return 0;
> -}
> -
> -const struct iomap_read_ops iomap_read_bios_ops = {
> -	.read_folio_range = iomap_read_folio_range_bio_async,
> -	.read_submit = iomap_submit_read_bio,
> -};
> -EXPORT_SYMBOL_GPL(iomap_read_bios_ops);
> -
>   static int iomap_read_folio_iter(struct iomap_iter *iter,
>   		struct iomap_read_folio_ctx *ctx, bool *cur_folio_owned)
>   {
> @@ -601,6 +526,82 @@ void iomap_readahead(const struct iomap_ops *ops,
>   }
>   EXPORT_SYMBOL_GPL(iomap_readahead);
>   
> +#ifdef CONFIG_BLOCK
> +static void iomap_read_end_io(struct bio *bio)
> +{
> +	int error = blk_status_to_errno(bio->bi_status);
> +	struct folio_iter fi;
> +
> +	bio_for_each_folio_all(fi, bio)
> +		iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
> +	bio_put(bio);
> +}
> +
> +static int iomap_submit_read_bio(struct iomap_read_folio_ctx *ctx)
> +{
> +	struct bio *bio = ctx->private;
> +
> +	if (bio)
> +		submit_bio(bio);
> +
> +	return 0;
> +}
> +
> +/**
> + * Read in a folio range asynchronously through bios.
> + *
> + * This should only be used for read/readahead, not for buffered writes.
> + * Buffered writes must read in the folio synchronously.
> + */
> +static int iomap_read_folio_range_bio_async(const struct iomap_iter *iter,
> +		struct iomap_read_folio_ctx *ctx, loff_t pos, size_t plen)
> +{
> +	struct folio *folio = ctx->cur_folio;
> +	const struct iomap *iomap = &iter->iomap;
> +	size_t poff = offset_in_folio(folio, pos);
> +	loff_t length = iomap_length(iter);
> +	sector_t sector;
> +	struct bio *bio = ctx->private;
> +
> +	iomap_start_folio_read(folio, plen);
> +
> +	sector = iomap_sector(iomap, pos);
> +	if (!bio || bio_end_sector(bio) != sector ||
> +	    !bio_add_folio(bio, folio, plen, poff)) {
> +		gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
> +		gfp_t orig_gfp = gfp;
> +		unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
> +
> +		if (bio)
> +			submit_bio(bio);
> +
> +		if (ctx->rac) /* same as readahead_gfp_mask */
> +			gfp |= __GFP_NORETRY | __GFP_NOWARN;
> +		bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs),
> +				     REQ_OP_READ, gfp);
> +		/*
> +		 * If the bio_alloc fails, try it again for a single page to
> +		 * avoid having to deal with partial page reads.  This emulates
> +		 * what do_mpage_read_folio does.
> +		 */
> +		if (!bio)
> +			bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, orig_gfp);
> +		if (ctx->rac)
> +			bio->bi_opf |= REQ_RAHEAD;
> +		bio->bi_iter.bi_sector = sector;
> +		bio->bi_end_io = iomap_read_end_io;
> +		bio_add_folio_nofail(bio, folio, plen, poff);
> +		ctx->private = bio;

Yes, I understand some way is needed to isolate bio from non-bio
based filesystems, and I also agree `bio` shouldn't be stashed
into `iter->private` since it's just an abuse usage as mentioned
in:
https://lore.kernel.org/r/20250903203031.GM1587915@frogsfrogsfrogs
https://lore.kernel.org/r/aLkskcgl3Z91oIVB@infradead.org

However, the naming of `(struct iomap_read_folio_ctx)->private`
really makes me feel confused because the `private` name in
`read_folio_ctx` is much like a filesystem read context instead
of just be used as `bio` internally in iomap for block-based
filesystems.

also the existing of `iter->private` makes the naming of
`ctx->private` more confusing at least in my view.

Thanks,
Gao Xiang

  reply	other threads:[~2025-09-09  2:14 UTC|newest]

Thread overview: 60+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-09-08 18:51 [PATCH v2 00/16] fuse: use iomap for buffered reads + readahead Joanne Koong
2025-09-08 18:51 ` [PATCH v2 01/16] iomap: move async bio read logic into helper function Joanne Koong
2025-09-11 11:09   ` Christoph Hellwig
2025-09-12 16:01     ` Joanne Koong
2025-09-08 18:51 ` [PATCH v2 02/16] iomap: move read/readahead bio submission " Joanne Koong
2025-09-11 11:09   ` Christoph Hellwig
2025-09-08 18:51 ` [PATCH v2 03/16] iomap: rename cur_folio_in_bio to folio_owned Joanne Koong
2025-09-11 11:10   ` Christoph Hellwig
2025-09-08 18:51 ` [PATCH v2 04/16] iomap: store read/readahead bio generically Joanne Koong
2025-09-11 11:11   ` Christoph Hellwig
2025-09-12 16:10     ` Joanne Koong
2025-09-08 18:51 ` [PATCH v2 05/16] iomap: propagate iomap_read_folio() error to caller Joanne Koong
2025-09-11 11:13   ` Christoph Hellwig
2025-09-12 16:28     ` Joanne Koong
2025-09-15 16:05       ` Christoph Hellwig
2025-09-08 18:51 ` [PATCH v2 06/16] iomap: iterate over entire folio in iomap_readpage_iter() Joanne Koong
2025-09-11 11:15   ` Christoph Hellwig
2025-09-08 18:51 ` [PATCH v2 07/16] iomap: rename iomap_readpage_iter() to iomap_read_folio_iter() Joanne Koong
2025-09-11 11:16   ` Christoph Hellwig
2025-09-08 18:51 ` [PATCH v2 08/16] iomap: rename iomap_readpage_ctx struct to iomap_read_folio_ctx Joanne Koong
2025-09-11 11:16   ` Christoph Hellwig
2025-09-08 18:51 ` [PATCH v2 09/16] iomap: add public start/finish folio read helpers Joanne Koong
2025-09-11 11:16   ` Christoph Hellwig
2025-09-08 18:51 ` [PATCH v2 10/16] iomap: make iomap_read_folio_ctx->folio_owned internal Joanne Koong
2025-09-11 11:17   ` Christoph Hellwig
2025-09-08 18:51 ` [PATCH v2 11/16] iomap: add caller-provided callbacks for read and readahead Joanne Koong
2025-09-09  0:14   ` Gao Xiang
2025-09-09  0:40     ` Gao Xiang
2025-09-09 15:24     ` Joanne Koong
2025-09-09 23:21       ` Gao Xiang
2025-09-10 17:41         ` Joanne Koong
2025-09-11 11:19           ` Christoph Hellwig
2025-09-11 11:26   ` Christoph Hellwig
2025-09-12 17:36     ` Joanne Koong
2025-09-08 18:51 ` [PATCH v2 12/16] iomap: add bias for async read requests Joanne Koong
2025-09-11 11:31   ` Christoph Hellwig
2025-09-12 17:30     ` Joanne Koong
2025-09-15 16:05       ` Christoph Hellwig
2025-09-16 19:14       ` Joanne Koong
2025-09-19 15:04         ` Christoph Hellwig
2025-09-19 17:58           ` Joanne Koong
2025-09-08 18:51 ` [PATCH v2 13/16] iomap: move read/readahead logic out of CONFIG_BLOCK guard Joanne Koong
2025-09-09  2:14   ` Gao Xiang [this message]
2025-09-09 15:33     ` Joanne Koong
2025-09-10  4:59       ` Gao Xiang
2025-09-11 11:37         ` Christoph Hellwig
2025-09-11 12:29           ` Gao Xiang
2025-09-11 19:45             ` Joanne Koong
2025-09-12  0:06               ` Gao Xiang
2025-09-12  1:09                 ` Gao Xiang
2025-09-12  1:10                   ` Gao Xiang
2025-09-12 19:56                     ` Joanne Koong
2025-09-12 20:09                       ` Joanne Koong
2025-09-12 23:35                         ` Gao Xiang
2025-09-12 23:20                       ` Gao Xiang
2025-09-11 11:44   ` Christoph Hellwig
2025-09-16 23:23     ` Joanne Koong
2025-09-08 18:51 ` [PATCH v2 14/16] fuse: use iomap for read_folio Joanne Koong
2025-09-08 18:51 ` [PATCH v2 15/16] fuse: use iomap for readahead Joanne Koong
2025-09-08 18:51 ` [PATCH v2 16/16] fuse: remove fc->blkbits workaround for partial writes Joanne Koong

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=a1529c0f-1f1a-477a-aeeb-a4f108aab26b@linux.alibaba.com \
    --to=hsiangkao@linux.alibaba.com \
    --cc=brauner@kernel.org \
    --cc=djwong@kernel.org \
    --cc=gfs2@lists.linux.dev \
    --cc=hch@infradead.org \
    --cc=joannelkoong@gmail.com \
    --cc=kernel-team@meta.com \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-xfs@vger.kernel.org \
    --cc=miklos@szeredi.hu \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox