From: Xiao Ni <xni@redhat.com>
To: song@kernel.org
Cc: linux-raid@vger.kernel.org, ming.lei@redhat.com,
ncroxon@redhat.com, heinzm@redhat.com
Subject: [PATCH V2 1/1] md/raid0: Add mddev->io_acct_cnt for raid0_quiesce
Date: Wed, 1 Feb 2023 09:07:12 +0800 [thread overview]
Message-ID: <20230201010712.97929-1-xni@redhat.com> (raw)
It has added io_acct_set for raid0/raid5 io accounting and it needs to
alloc md_io_acct in the i/o path. They are free when the bios come back
from member disks. Now we don't have a method to monitor if those bios
are all come back. In the takeover process, it needs to free the raid0
memory resource including the memory pool for md_io_acct. But maybe some
bios are still not returned. When those bios are returned, it can cause
panic bcause of introducing NULL pointer or invalid address.
This patch adds io_acct_cnt. So when stopping raid0, it can use this
to wait until all bios come back. And I did a simple performance test
with fio:
-direct=1 -ioengine=libaio -iodepth=128 -bs=64K -rw=write -numjobs=1
With the patch set: 2676MB/s, without the patch set: 2670MB/s
-direct=1 -ioengine=libaio -iodepth=128 -bs=64K -rw=read -numjobs=1
With the patch set: 4676MB/s, without the patch set: 4654MB/s
Reported-by: Fine Fan <ffan@redhat.com>
Reported-by: kernel test robot <oliver.sang@intel.com>
Signed-off-by: Xiao Ni <xni@redhat.com>
---
v2: Fixes a bug. It needs to check if io_acct is dead state when
resurrecting
drivers/md/md.c | 23 ++++++++++++++++++++++-
drivers/md/md.h | 9 ++++++---
drivers/md/raid0.c | 6 ++++++
3 files changed, 34 insertions(+), 4 deletions(-)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 0eb31bef1f01..66c3639bdbfd 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -683,6 +683,7 @@ void mddev_init(struct mddev *mddev)
atomic_set(&mddev->flush_pending, 0);
init_waitqueue_head(&mddev->sb_wait);
init_waitqueue_head(&mddev->recovery_wait);
+ init_waitqueue_head(&mddev->wait_io_acct);
mddev->reshape_position = MaxSector;
mddev->reshape_backwards = 0;
mddev->last_sync_action = "none";
@@ -8604,13 +8605,28 @@ void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
}
EXPORT_SYMBOL_GPL(md_submit_discard_bio);
+static void io_acct_release(struct percpu_ref *ref)
+{
+ struct mddev *mddev = container_of(ref, struct mddev, io_acct_cnt);
+
+ wake_up(&mddev->wait_io_acct);
+}
+
int acct_bioset_init(struct mddev *mddev)
{
int err = 0;
- if (!bioset_initialized(&mddev->io_acct_set))
+ if (!bioset_initialized(&mddev->io_acct_set)) {
+ err = percpu_ref_init(&mddev->io_acct_cnt, io_acct_release,
+ PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
+ if (err)
+ return err;
+
err = bioset_init(&mddev->io_acct_set, BIO_POOL_SIZE,
offsetof(struct md_io_acct, bio_clone), 0);
+ if (err)
+ percpu_ref_exit(&mddev->io_acct_cnt);
+ }
return err;
}
EXPORT_SYMBOL_GPL(acct_bioset_init);
@@ -8618,6 +8634,7 @@ EXPORT_SYMBOL_GPL(acct_bioset_init);
void acct_bioset_exit(struct mddev *mddev)
{
bioset_exit(&mddev->io_acct_set);
+ percpu_ref_exit(&mddev->io_acct_cnt);
}
EXPORT_SYMBOL_GPL(acct_bioset_exit);
@@ -8625,9 +8642,11 @@ static void md_end_io_acct(struct bio *bio)
{
struct md_io_acct *md_io_acct = bio->bi_private;
struct bio *orig_bio = md_io_acct->orig_bio;
+ struct mddev *mddev = md_io_acct->mddev;
orig_bio->bi_status = bio->bi_status;
+ percpu_ref_put(&mddev->io_acct_cnt);
bio_end_io_acct(orig_bio, md_io_acct->start_time);
bio_put(bio);
bio_endio(orig_bio);
@@ -8650,6 +8669,8 @@ void md_account_bio(struct mddev *mddev, struct bio **bio)
md_io_acct = container_of(clone, struct md_io_acct, bio_clone);
md_io_acct->orig_bio = *bio;
md_io_acct->start_time = bio_start_io_acct(*bio);
+ md_io_acct->mddev = mddev;
+ percpu_ref_get(&mddev->io_acct_cnt);
clone->bi_end_io = md_end_io_acct;
clone->bi_private = md_io_acct;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 6335cb86e52e..c0e869bdde42 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -513,6 +513,8 @@ struct mddev {
* metadata and bitmap writes
*/
struct bio_set io_acct_set; /* for raid0 and raid5 io accounting */
+ struct percpu_ref io_acct_cnt;
+ wait_queue_head_t wait_io_acct;
/* Generic flush handling.
* The last to finish preflush schedules a worker to submit
@@ -710,9 +712,10 @@ struct md_thread {
};
struct md_io_acct {
- struct bio *orig_bio;
- unsigned long start_time;
- struct bio bio_clone;
+ struct mddev *mddev;
+ struct bio *orig_bio;
+ unsigned long start_time;
+ struct bio bio_clone;
};
#define THREAD_WAKEUP 0
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index b536befd8898..f732ca8e2e1f 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -753,6 +753,12 @@ static void *raid0_takeover(struct mddev *mddev)
static void raid0_quiesce(struct mddev *mddev, int quiesce)
{
+ if (quiesce) {
+ percpu_ref_kill(&mddev->io_acct_cnt);
+ wait_event(mddev->wait_io_acct,
+ percpu_ref_is_zero(&mddev->io_acct_cnt));
+ } else if (percpu_ref_is_dying(&mddev->io_acct_cnt))
+ percpu_ref_resurrect(&mddev->io_acct_cnt);
}
static struct md_personality raid0_personality=
--
2.32.0 (Apple Git-132)
next reply other threads:[~2023-02-01 1:08 UTC|newest]
Thread overview: 2+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-02-01 1:07 Xiao Ni [this message]
2023-02-01 7:21 ` [PATCH V2 1/1] md/raid0: Add mddev->io_acct_cnt for raid0_quiesce Song Liu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230201010712.97929-1-xni@redhat.com \
--to=xni@redhat.com \
--cc=heinzm@redhat.com \
--cc=linux-raid@vger.kernel.org \
--cc=ming.lei@redhat.com \
--cc=ncroxon@redhat.com \
--cc=song@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).