From: Yu Kuai <yukuai1@huaweicloud.com>
To: hch@infradead.org, colyli@kernel.org, hare@suse.de,
tieren@fnnas.com, axboe@kernel.dk, tj@kernel.org,
josef@toxicpanda.com, song@kernel.org, yukuai3@huawei.com,
akpm@linux-foundation.org, neil@brown.name
Cc: linux-block@vger.kernel.org, linux-kernel@vger.kernel.org,
cgroups@vger.kernel.org, linux-raid@vger.kernel.org,
yukuai1@huaweicloud.com, yi.zhang@huawei.com,
yangerkun@huawei.com, johnny.chenyi@huawei.com
Subject: [PATCH RFC 1/7] block: export helper bio_submit_split()
Date: Mon, 25 Aug 2025 17:36:54 +0800 [thread overview]
Message-ID: <20250825093700.3731633-2-yukuai1@huaweicloud.com> (raw)
In-Reply-To: <20250825093700.3731633-1-yukuai1@huaweicloud.com>
From: Yu Kuai <yukuai3@huawei.com>
No functional changes are intended, some drivers like mdraid will split
bio by internal processing, prepare to unify bio split codes.
Signed-off-by: Yu Kuai <yukuai3@huawei.com>
---
block/blk-merge.c | 60 +++++++++++++++++++++++++++++----------------
include/linux/bio.h | 2 ++
2 files changed, 41 insertions(+), 21 deletions(-)
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 70d704615be5..c45d5e43e172 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -104,35 +104,49 @@ static unsigned int bio_allowed_max_sectors(const struct queue_limits *lim)
return round_down(UINT_MAX, lim->logical_block_size) >> SECTOR_SHIFT;
}
-static struct bio *bio_submit_split(struct bio *bio, int split_sectors)
+/**
+ * bio_submit_split - Submit a bio, splitting it at a designated sector
+ * @bio: the original bio to be submitted and split
+ * @split_sectors: the sector count (from the start of @bio) at which to split
+ * @bs: the bio set used for allocating the new split bio
+ *
+ * The original bio is modified to contain the remaining sectors and submitted.
+ * The caller is responsible for submitting the returned bio.
+ *
+ * If succeed, the newly allocated bio representing the initial part will be
+ * returned, on failure NULL will be returned and original bio will fail.
+ */
+struct bio *bio_submit_split(struct bio *bio, int split_sectors,
+ struct bio_set *bs)
{
+ struct bio *split;
+
if (unlikely(split_sectors < 0))
goto error;
- if (split_sectors) {
- struct bio *split;
+ if (!split_sectors)
+ return bio;
- split = bio_split(bio, split_sectors, GFP_NOIO,
- &bio->bi_bdev->bd_disk->bio_split);
- if (IS_ERR(split)) {
- split_sectors = PTR_ERR(split);
- goto error;
- }
- split->bi_opf |= REQ_NOMERGE;
- blkcg_bio_issue_init(split);
- bio_chain(split, bio);
- trace_block_split(split, bio->bi_iter.bi_sector);
- WARN_ON_ONCE(bio_zone_write_plugging(bio));
- submit_bio_noacct(bio);
- return split;
+ split = bio_split(bio, split_sectors, GFP_NOIO, bs);
+ if (IS_ERR(split)) {
+ split_sectors = PTR_ERR(split);
+ goto error;
}
- return bio;
+ split->bi_opf |= REQ_NOMERGE;
+ blkcg_bio_issue_init(split);
+ bio_chain(split, bio);
+ trace_block_split(split, bio->bi_iter.bi_sector);
+ WARN_ON_ONCE(bio_zone_write_plugging(bio));
+ submit_bio_noacct(bio);
+ return split;
+
error:
bio->bi_status = errno_to_blk_status(split_sectors);
bio_endio(bio);
return NULL;
}
+EXPORT_SYMBOL_GPL(bio_submit_split);
struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim,
unsigned *nsegs)
@@ -167,7 +181,8 @@ struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim,
if (split_sectors > tmp)
split_sectors -= tmp;
- return bio_submit_split(bio, split_sectors);
+ return bio_submit_split(bio, split_sectors,
+ &bio->bi_bdev->bd_disk->bio_split);
}
static inline unsigned int blk_boundary_sectors(const struct queue_limits *lim,
@@ -357,7 +372,8 @@ struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
{
return bio_submit_split(bio,
bio_split_rw_at(bio, lim, nr_segs,
- get_max_io_size(bio, lim) << SECTOR_SHIFT));
+ get_max_io_size(bio, lim) << SECTOR_SHIFT),
+ &bio->bi_bdev->bd_disk->bio_split);
}
/*
@@ -376,7 +392,8 @@ struct bio *bio_split_zone_append(struct bio *bio,
lim->max_zone_append_sectors << SECTOR_SHIFT);
if (WARN_ON_ONCE(split_sectors > 0))
split_sectors = -EINVAL;
- return bio_submit_split(bio, split_sectors);
+ return bio_submit_split(bio, split_sectors,
+ &bio->bi_bdev->bd_disk->bio_split);
}
struct bio *bio_split_write_zeroes(struct bio *bio,
@@ -396,7 +413,8 @@ struct bio *bio_split_write_zeroes(struct bio *bio,
return bio;
if (bio_sectors(bio) <= max_sectors)
return bio;
- return bio_submit_split(bio, max_sectors);
+ return bio_submit_split(bio, max_sectors,
+ &bio->bi_bdev->bd_disk->bio_split);
}
/**
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 46ffac5caab7..2233261be5e8 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -324,6 +324,8 @@ extern struct bio *bio_split(struct bio *bio, int sectors,
gfp_t gfp, struct bio_set *bs);
int bio_split_rw_at(struct bio *bio, const struct queue_limits *lim,
unsigned *segs, unsigned max_bytes);
+struct bio *bio_submit_split(struct bio *bio, int split_sectors,
+ struct bio_set *bs);
/**
* bio_next_split - get next @sectors from a bio, splitting if necessary
--
2.39.2
next prev parent reply other threads:[~2025-08-25 9:45 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-08-25 9:36 [PATCH RFC 0/7] block: fix disordered IO in the case recursive split Yu Kuai
2025-08-25 9:36 ` Yu Kuai [this message]
2025-08-25 10:53 ` [PATCH RFC 1/7] block: export helper bio_submit_split() Christoph Hellwig
2025-08-26 0:51 ` Yu Kuai
2025-08-25 9:36 ` [PATCH RFC 2/7] md/raid0: convert raid0_handle_discard() to use bio_submit_split() Yu Kuai
2025-08-25 10:57 ` Christoph Hellwig
2025-08-26 1:08 ` Yu Kuai
2025-08-26 7:54 ` Christoph Hellwig
2025-08-26 9:11 ` Yu Kuai
2025-08-25 9:36 ` [PATCH RFC 3/7] md/raid1: convert " Yu Kuai
2025-08-25 10:57 ` Christoph Hellwig
2025-08-26 1:09 ` Yu Kuai
2025-08-25 9:36 ` [PATCH RFC 4/7] md/raid10: convert read/write " Yu Kuai
2025-08-25 10:59 ` Christoph Hellwig
2025-08-26 1:13 ` Yu Kuai
2025-08-26 7:55 ` Christoph Hellwig
2025-08-26 9:14 ` Yu Kuai
2025-08-26 17:35 ` anthony
2025-08-27 7:31 ` Christoph Hellwig
2025-09-02 6:18 ` John Garry
2025-09-02 6:30 ` Christoph Hellwig
2025-09-02 6:58 ` John Garry
2025-09-02 8:25 ` Yu Kuai
2025-09-02 14:46 ` John Garry
2025-08-25 9:36 ` [PATCH RFC 5/7] md/raid5: convert " Yu Kuai
2025-08-25 11:00 ` Christoph Hellwig
2025-08-26 1:15 ` Yu Kuai
2025-08-26 7:56 ` Christoph Hellwig
2025-08-25 9:36 ` [PATCH RFC 6/7] md/md-linear: " Yu Kuai
2025-08-25 9:37 ` [PATCH RFC 7/7] block: fix disordered IO in the case recursive split Yu Kuai
2025-08-25 11:07 ` Christoph Hellwig
2025-08-26 1:20 ` Yu Kuai
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250825093700.3731633-2-yukuai1@huaweicloud.com \
--to=yukuai1@huaweicloud.com \
--cc=akpm@linux-foundation.org \
--cc=axboe@kernel.dk \
--cc=cgroups@vger.kernel.org \
--cc=colyli@kernel.org \
--cc=hare@suse.de \
--cc=hch@infradead.org \
--cc=johnny.chenyi@huawei.com \
--cc=josef@toxicpanda.com \
--cc=linux-block@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-raid@vger.kernel.org \
--cc=neil@brown.name \
--cc=song@kernel.org \
--cc=tieren@fnnas.com \
--cc=tj@kernel.org \
--cc=yangerkun@huawei.com \
--cc=yi.zhang@huawei.com \
--cc=yukuai3@huawei.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).