From: Yu Kuai <yukuai1@huaweicloud.com>
To: hch@infradead.org, colyli@kernel.org, hare@suse.de,
tieren@fnnas.com, axboe@kernel.dk, tj@kernel.org,
josef@toxicpanda.com, song@kernel.org, yukuai3@huawei.com,
akpm@linux-foundation.org, neil@brown.name
Cc: linux-block@vger.kernel.org, linux-kernel@vger.kernel.org,
cgroups@vger.kernel.org, linux-raid@vger.kernel.org,
yukuai1@huaweicloud.com, yi.zhang@huawei.com,
yangerkun@huawei.com, johnny.chenyi@huawei.com
Subject: [PATCH RFC 4/7] md/raid10: convert read/write to use bio_submit_split()
Date: Mon, 25 Aug 2025 17:36:57 +0800 [thread overview]
Message-ID: <20250825093700.3731633-5-yukuai1@huaweicloud.com> (raw)
In-Reply-To: <20250825093700.3731633-1-yukuai1@huaweicloud.com>
From: Yu Kuai <yukuai3@huawei.com>
On the one hand unify bio split code, prepare to fix disordered split
IO; On the other hand fix missing blkcg_bio_issue_init() and
trace_block_split() for split IO.
Noted discard is not handled, because discard is only splited for
unaligned head and tail.
Signed-off-by: Yu Kuai <yukuai3@huawei.com>
---
drivers/md/raid10.c | 53 ++++++++++++++++++++-------------------------
drivers/md/raid10.h | 1 +
2 files changed, 24 insertions(+), 30 deletions(-)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index b60c30bfb6c7..b8777661307b 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -322,10 +322,12 @@ static void raid_end_bio_io(struct r10bio *r10_bio)
struct bio *bio = r10_bio->master_bio;
struct r10conf *conf = r10_bio->mddev->private;
- if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
- bio->bi_status = BLK_STS_IOERR;
+ if (!test_and_set_bit(R10BIO_Returned, &r10_bio->state)) {
+ if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
+ bio->bi_status = BLK_STS_IOERR;
+ bio_endio(bio);
+ }
- bio_endio(bio);
/*
* Wake up any possible resync thread that waits for the device
* to go idle.
@@ -1154,7 +1156,6 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
int slot = r10_bio->read_slot;
struct md_rdev *err_rdev = NULL;
gfp_t gfp = GFP_NOIO;
- int error;
if (slot >= 0 && r10_bio->devs[slot].rdev) {
/*
@@ -1203,17 +1204,16 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
rdev->bdev,
(unsigned long long)r10_bio->sector);
if (max_sectors < bio_sectors(bio)) {
- struct bio *split = bio_split(bio, max_sectors,
- gfp, &conf->bio_split);
- if (IS_ERR(split)) {
- error = PTR_ERR(split);
- goto err_handle;
- }
- bio_chain(split, bio);
allow_barrier(conf);
- submit_bio_noacct(bio);
+ bio = bio_submit_split(bio, max_sectors, &conf->bio_split);
wait_barrier(conf, false);
- bio = split;
+
+ if (!bio) {
+ set_bit(R10BIO_Returned, &r10_bio->state);
+ goto err_handle;
+ }
+
+ bio->bi_opf &= ~REQ_NOMERGE;
r10_bio->master_bio = bio;
r10_bio->sectors = max_sectors;
}
@@ -1239,10 +1239,9 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
mddev_trace_remap(mddev, read_bio, r10_bio->sector);
submit_bio_noacct(read_bio);
return;
+
err_handle:
atomic_dec(&rdev->nr_pending);
- bio->bi_status = errno_to_blk_status(error);
- set_bit(R10BIO_Uptodate, &r10_bio->state);
raid_end_bio_io(r10_bio);
}
@@ -1351,7 +1350,6 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
int i, k;
sector_t sectors;
int max_sectors;
- int error;
if ((mddev_is_clustered(mddev) &&
mddev->cluster_ops->area_resyncing(mddev, WRITE,
@@ -1465,10 +1463,8 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
* complexity of supporting that is not worth
* the benefit.
*/
- if (bio->bi_opf & REQ_ATOMIC) {
- error = -EIO;
+ if (bio->bi_opf & REQ_ATOMIC)
goto err_handle;
- }
good_sectors = first_bad - dev_sector;
if (good_sectors < max_sectors)
@@ -1489,17 +1485,16 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
r10_bio->sectors = max_sectors;
if (r10_bio->sectors < bio_sectors(bio)) {
- struct bio *split = bio_split(bio, r10_bio->sectors,
- GFP_NOIO, &conf->bio_split);
- if (IS_ERR(split)) {
- error = PTR_ERR(split);
- goto err_handle;
- }
- bio_chain(split, bio);
allow_barrier(conf);
- submit_bio_noacct(bio);
+ bio = bio_submit_split(bio, r10_bio->sectors, &conf->bio_split);
wait_barrier(conf, false);
- bio = split;
+
+ if (!bio) {
+ set_bit(R10BIO_Returned, &r10_bio->state);
+ goto err_handle;
+ }
+
+ bio->bi_opf &= ~REQ_NOMERGE;
r10_bio->master_bio = bio;
}
@@ -1531,8 +1526,6 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
}
}
- bio->bi_status = errno_to_blk_status(error);
- set_bit(R10BIO_Uptodate, &r10_bio->state);
raid_end_bio_io(r10_bio);
}
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 3f16ad6904a9..cc167e708125 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -165,6 +165,7 @@ enum r10bio_state {
* so that raid10d knows what to do with them.
*/
R10BIO_ReadError,
+ R10BIO_Returned,
/* If a write for this request means we can clear some
* known-bad-block records, we set this flag.
*/
--
2.39.2
next prev parent reply other threads:[~2025-08-25 9:45 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-08-25 9:36 [PATCH RFC 0/7] block: fix disordered IO in the case recursive split Yu Kuai
2025-08-25 9:36 ` [PATCH RFC 1/7] block: export helper bio_submit_split() Yu Kuai
2025-08-25 10:53 ` Christoph Hellwig
2025-08-26 0:51 ` Yu Kuai
2025-08-25 9:36 ` [PATCH RFC 2/7] md/raid0: convert raid0_handle_discard() to use bio_submit_split() Yu Kuai
2025-08-25 10:57 ` Christoph Hellwig
2025-08-26 1:08 ` Yu Kuai
2025-08-26 7:54 ` Christoph Hellwig
2025-08-26 9:11 ` Yu Kuai
2025-08-25 9:36 ` [PATCH RFC 3/7] md/raid1: convert " Yu Kuai
2025-08-25 10:57 ` Christoph Hellwig
2025-08-26 1:09 ` Yu Kuai
2025-08-25 9:36 ` Yu Kuai [this message]
2025-08-25 10:59 ` [PATCH RFC 4/7] md/raid10: convert read/write " Christoph Hellwig
2025-08-26 1:13 ` Yu Kuai
2025-08-26 7:55 ` Christoph Hellwig
2025-08-26 9:14 ` Yu Kuai
2025-08-26 17:35 ` anthony
2025-08-27 7:31 ` Christoph Hellwig
2025-09-02 6:18 ` John Garry
2025-09-02 6:30 ` Christoph Hellwig
2025-09-02 6:58 ` John Garry
2025-09-02 8:25 ` Yu Kuai
2025-09-02 14:46 ` John Garry
2025-08-25 9:36 ` [PATCH RFC 5/7] md/raid5: convert " Yu Kuai
2025-08-25 11:00 ` Christoph Hellwig
2025-08-26 1:15 ` Yu Kuai
2025-08-26 7:56 ` Christoph Hellwig
2025-08-25 9:36 ` [PATCH RFC 6/7] md/md-linear: " Yu Kuai
2025-08-25 9:37 ` [PATCH RFC 7/7] block: fix disordered IO in the case recursive split Yu Kuai
2025-08-25 11:07 ` Christoph Hellwig
2025-08-26 1:20 ` Yu Kuai
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250825093700.3731633-5-yukuai1@huaweicloud.com \
--to=yukuai1@huaweicloud.com \
--cc=akpm@linux-foundation.org \
--cc=axboe@kernel.dk \
--cc=cgroups@vger.kernel.org \
--cc=colyli@kernel.org \
--cc=hare@suse.de \
--cc=hch@infradead.org \
--cc=johnny.chenyi@huawei.com \
--cc=josef@toxicpanda.com \
--cc=linux-block@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-raid@vger.kernel.org \
--cc=neil@brown.name \
--cc=song@kernel.org \
--cc=tieren@fnnas.com \
--cc=tj@kernel.org \
--cc=yangerkun@huawei.com \
--cc=yi.zhang@huawei.com \
--cc=yukuai3@huawei.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).