From: Song Liu <song@kernel.org>
To: Vishal Verma <vverma@digitalocean.com>
Cc: linux-raid <linux-raid@vger.kernel.org>,
Jens Axboe <axboe@kernel.dk>,
rgoldwyn@suse.de
Subject: Re: [PATCH v5 2/4] md: raid1 add nowait support
Date: Wed, 15 Dec 2021 12:33:38 -0800 [thread overview]
Message-ID: <CAPhsuW5vrMcyMHXNyyFUkzdHnWAGs+WUSLwkrrpyt81Bu3ap2g@mail.gmail.com> (raw)
In-Reply-To: <20211215060906.3230-2-vverma@digitalocean.com>
On Tue, Dec 14, 2021 at 10:09 PM Vishal Verma <vverma@digitalocean.com> wrote:
>
> This adds nowait support to the RAID1 driver. It makes RAID1 driver
> return with EAGAIN for situations where it could wait for eg:
>
> - Waiting for the barrier,
> - Array got frozen,
> - Too many pending I/Os to be queued.
>
> wait_barrier() fn is modified to return bool to support error for
> wait barriers. It returns true in case of wait or if wait is not
> required and returns false if wait was required but not performed
> to support nowait.
Please see some detailed comments below. But a general and more important
question: were you able to trigger these conditions (path that lead to
bio_wouldblock_error) in the tests?
Ideally, we should test all these conditions. If something is really
hard to trigger,
please highlight that in the commit log, so that I can run more tests on them.
Thanks,
Song
>
> Signed-off-by: Vishal Verma <vverma@digitalocean.com>
> ---
> drivers/md/raid1.c | 74 +++++++++++++++++++++++++++++++++++-----------
> 1 file changed, 57 insertions(+), 17 deletions(-)
>
> diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
> index 7dc8026cf6ee..727d31de5694 100644
> --- a/drivers/md/raid1.c
> +++ b/drivers/md/raid1.c
> @@ -929,8 +929,9 @@ static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
> wake_up(&conf->wait_barrier);
> }
>
> -static void _wait_barrier(struct r1conf *conf, int idx)
> +static bool _wait_barrier(struct r1conf *conf, int idx, bool nowait)
> {
> + bool ret = true;
> /*
> * We need to increase conf->nr_pending[idx] very early here,
> * then raise_barrier() can be blocked when it waits for
> @@ -961,7 +962,7 @@ static void _wait_barrier(struct r1conf *conf, int idx)
> */
> if (!READ_ONCE(conf->array_frozen) &&
> !atomic_read(&conf->barrier[idx]))
> - return;
> + return ret;
>
> /*
> * After holding conf->resync_lock, conf->nr_pending[idx]
> @@ -979,18 +980,27 @@ static void _wait_barrier(struct r1conf *conf, int idx)
> */
> wake_up(&conf->wait_barrier);
> /* Wait for the barrier in same barrier unit bucket to drop. */
> - wait_event_lock_irq(conf->wait_barrier,
> - !conf->array_frozen &&
> - !atomic_read(&conf->barrier[idx]),
> - conf->resync_lock);
> + if (conf->array_frozen || atomic_read(&conf->barrier[idx])) {
Do we really need this check?
> + /* Return false when nowait flag is set */
> + if (nowait)
> + ret = false;
> + else {
> + wait_event_lock_irq(conf->wait_barrier,
> + !conf->array_frozen &&
> + !atomic_read(&conf->barrier[idx]),
> + conf->resync_lock);
> + }
> + }
> atomic_inc(&conf->nr_pending[idx]);
Were you able to trigger the condition in the tests? I think we should
only increase
nr_pending for ret == true. Otherwise, we will leak a nr_pending.
> atomic_dec(&conf->nr_waiting[idx]);
> spin_unlock_irq(&conf->resync_lock);
> + return ret;
> }
>
> -static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
> +static bool wait_read_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait)
> {
> int idx = sector_to_idx(sector_nr);
> + bool ret = true;
>
> /*
> * Very similar to _wait_barrier(). The difference is, for read
> @@ -1002,7 +1012,7 @@ static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
> atomic_inc(&conf->nr_pending[idx]);
>
> if (!READ_ONCE(conf->array_frozen))
> - return;
> + return ret;
>
> spin_lock_irq(&conf->resync_lock);
> atomic_inc(&conf->nr_waiting[idx]);
> @@ -1013,19 +1023,27 @@ static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
> */
> wake_up(&conf->wait_barrier);
> /* Wait for array to be unfrozen */
> - wait_event_lock_irq(conf->wait_barrier,
> - !conf->array_frozen,
> - conf->resync_lock);
> + if (conf->array_frozen || atomic_read(&conf->barrier[idx])) {
I guess we don't need this either. Also, the condition there is not identical
to wait_barrier (no need to check conf->barrier[idx]).
> + if (nowait)
> + /* Return false when nowait flag is set */
> + ret = false;
> + else {
> + wait_event_lock_irq(conf->wait_barrier,
> + !conf->array_frozen,
> + conf->resync_lock);
> + }
> + }
> atomic_inc(&conf->nr_pending[idx]);
ditto on nr_pending.
> atomic_dec(&conf->nr_waiting[idx]);
> spin_unlock_irq(&conf->resync_lock);
> + return ret;
> }
>
> -static void wait_barrier(struct r1conf *conf, sector_t sector_nr)
> +static bool wait_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait)
> {
> int idx = sector_to_idx(sector_nr);
>
> - _wait_barrier(conf, idx);
> + return _wait_barrier(conf, idx, nowait);
> }
>
> static void _allow_barrier(struct r1conf *conf, int idx)
> @@ -1236,7 +1254,11 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
> * Still need barrier for READ in case that whole
> * array is frozen.
> */
> - wait_read_barrier(conf, bio->bi_iter.bi_sector);
> + if (!wait_read_barrier(conf, bio->bi_iter.bi_sector,
> + bio->bi_opf & REQ_NOWAIT)) {
> + bio_wouldblock_error(bio);
> + return;
> + }
>
> if (!r1_bio)
> r1_bio = alloc_r1bio(mddev, bio);
> @@ -1336,6 +1358,10 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
> bio->bi_iter.bi_sector, bio_end_sector(bio))) {
>
> DEFINE_WAIT(w);
> + if (bio->bi_opf & REQ_NOWAIT) {
> + bio_wouldblock_error(bio);
> + return;
> + }
> for (;;) {
> prepare_to_wait(&conf->wait_barrier,
> &w, TASK_IDLE);
> @@ -1353,17 +1379,26 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
> * thread has put up a bar for new requests.
> * Continue immediately if no resync is active currently.
> */
> - wait_barrier(conf, bio->bi_iter.bi_sector);
> + if (!wait_barrier(conf, bio->bi_iter.bi_sector,
> + bio->bi_opf & REQ_NOWAIT)) {
> + bio_wouldblock_error(bio);
> + return;
> + }
>
> r1_bio = alloc_r1bio(mddev, bio);
> r1_bio->sectors = max_write_sectors;
>
> if (conf->pending_count >= max_queued_requests) {
> md_wakeup_thread(mddev->thread);
> + if (bio->bi_opf & REQ_NOWAIT) {
> + bio_wouldblock_error(bio);
I think we need to fix conf->nr_pending before returning.
> + return;
> + }
> raid1_log(mddev, "wait queued");
> wait_event(conf->wait_barrier,
> conf->pending_count < max_queued_requests);
> }
> +
> /* first select target devices under rcu_lock and
> * inc refcount on their rdev. Record them by setting
> * bios[x] to bio
> @@ -1458,9 +1493,14 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
> rdev_dec_pending(conf->mirrors[j].rdev, mddev);
> r1_bio->state = 0;
> allow_barrier(conf, bio->bi_iter.bi_sector);
> +
> + if (bio->bi_opf & REQ_NOWAIT) {
> + bio_wouldblock_error(bio);
> + return;
> + }
> raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
> md_wait_for_blocked_rdev(blocked_rdev, mddev);
> - wait_barrier(conf, bio->bi_iter.bi_sector);
> + wait_barrier(conf, bio->bi_iter.bi_sector, false);
> goto retry_write;
> }
>
> @@ -1687,7 +1727,7 @@ static void close_sync(struct r1conf *conf)
> int idx;
>
> for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) {
> - _wait_barrier(conf, idx);
> + _wait_barrier(conf, idx, false);
> _allow_barrier(conf, idx);
> }
>
> --
> 2.17.1
>
next prev parent reply other threads:[~2021-12-15 20:33 UTC|newest]
Thread overview: 86+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-11-01 21:51 [PATCH] md: add support for REQ_NOWAIT Vishal Verma
2021-11-02 3:41 ` Li Feng
2021-11-02 5:01 ` Song Liu
2021-11-02 14:40 ` [PATCH v2] " Vishal Verma
2021-11-02 15:31 ` Jens Axboe
2021-11-02 18:35 ` Song Liu
2021-11-04 4:51 ` [PATCH v3 2/2] md: raid1 add nowait support Vishal Verma
2021-11-04 4:51 ` [PATCH v3 1/2] md: add support for REQ_NOWAIT Vishal Verma
2021-11-06 15:38 ` Guoqing Jiang
2021-11-07 0:16 ` Vishal Verma
2021-11-08 22:17 ` Song Liu
2021-11-08 22:36 ` Vishal Verma
2021-11-06 15:24 ` [PATCH v3 2/2] md: raid1 add nowait support Guoqing Jiang
2021-11-07 0:18 ` Vishal Verma
2021-11-08 22:32 ` Song Liu
2021-11-08 22:39 ` Vishal Verma
2021-11-09 20:59 ` Vishal Verma
2021-11-10 17:02 ` Song Liu
2021-11-10 17:04 ` Vishal Verma
2021-11-10 18:14 ` [RFC PATCH v4 1/4] md: add support for REQ_NOWAIT Vishal Verma
2021-11-10 18:14 ` [RFC PATCH v4 2/4] md: raid1 add nowait support Vishal Verma
2021-11-10 18:14 ` [RFC PATCH v4 3/4] md: raid10 " Vishal Verma
2021-12-14 0:32 ` Song Liu
2021-12-14 15:27 ` Vishal Verma
2021-11-10 18:14 ` [RFC PATCH v4 4/4] md: raid456 " Vishal Verma
2021-11-11 21:42 ` Song Liu
[not found] ` <f8c2a2bc-a885-8254-2b39-fc0c969ac70d@digitalocean.com>
2021-11-19 4:07 ` Song Liu
2021-11-19 4:20 ` Vishal Verma
2021-12-09 16:53 ` Vishal Verma
2021-12-09 16:59 ` Song Liu
2021-12-09 17:01 ` Vishal Verma
2021-12-10 2:16 ` Song Liu
2021-12-10 7:18 ` Song Liu
2021-12-10 18:26 ` Vishal Verma
2021-12-13 5:56 ` Song Liu
2021-12-13 22:43 ` Vishal Verma
2021-12-13 23:35 ` Jens Axboe
[not found] ` <78d5f029-791e-6d3f-4871-263ec6b5c09b@digitalocean.com>
2021-12-14 1:11 ` Song Liu
2021-12-14 1:12 ` Vishal Verma
2021-12-14 15:30 ` Vishal Verma
2021-12-14 17:08 ` Song Liu
2021-12-14 18:09 ` Vishal Verma
2021-12-15 6:09 ` [PATCH v5 1/4] md: add support for REQ_NOWAIT Vishal Verma
2021-12-15 6:09 ` [PATCH v5 2/4] md: raid1 add nowait support Vishal Verma
2021-12-15 20:33 ` Song Liu [this message]
2021-12-15 22:20 ` Vishal Verma
2021-12-21 20:06 ` [PATCH v6 1/4] md: add support for REQ_NOWAIT Vishal Verma
2021-12-21 20:06 ` [PATCH v6 2/4] md: raid1 add nowait support Vishal Verma
2021-12-21 20:06 ` [PATCH v6 3/4] md: raid10 " Vishal Verma
2021-12-22 23:58 ` Song Liu
2021-12-23 1:47 ` Song Liu
2021-12-21 20:06 ` [PATCH v6 4/4] md: raid456 " Vishal Verma
2021-12-21 22:02 ` John Stoffel
2021-12-25 2:14 ` Song Liu
[not found] ` <aadc6d52-bc6e-527a-3b9c-0be225f9b727@digitalocean.com>
2021-12-25 22:13 ` Vishal Verma
2021-12-26 0:07 ` Song Liu
2021-12-26 4:02 ` Vishal Verma
2021-12-26 21:20 ` Vishal Verma
2021-12-22 16:06 ` [PATCH v6 1/4] md: add support for REQ_NOWAIT Jens Axboe
2021-12-23 1:22 ` Song Liu
2021-12-23 2:57 ` Song Liu
2021-12-23 3:08 ` Vishal Verma
2022-01-02 0:11 ` Song Liu
2022-01-02 2:08 ` Vishal Verma
2021-12-23 8:36 ` Christoph Hellwig
2021-12-15 6:09 ` [PATCH v5 3/4] md: raid10 add nowait support Vishal Verma
2021-12-15 20:42 ` Song Liu
2021-12-15 22:20 ` Vishal Verma
2021-12-16 0:30 ` Vishal Verma
2021-12-16 16:40 ` Vishal Verma
2021-12-16 16:42 ` Jens Axboe
2021-12-16 16:45 ` Vishal Verma
2021-12-16 18:49 ` Jens Axboe
2021-12-16 19:40 ` Vishal Verma
2021-12-16 20:18 ` Song Liu
2021-12-16 20:37 ` Vishal Verma
2021-12-16 23:50 ` Song Liu
[not found] ` <bd90d6e6-adb4-2696-3110-fad0b1ee00dc@digitalocean.com>
2021-12-21 8:13 ` Song Liu
2021-12-21 15:29 ` Vishal Verma
2021-12-21 15:59 ` Jens Axboe
2021-12-21 16:26 ` Vishal Verma
2021-12-16 18:14 ` Vishal Verma
2021-12-15 6:09 ` [PATCH v5 4/4] md: raid456 " Vishal Verma
2021-12-15 20:02 ` [PATCH v5 1/4] md: add support for REQ_NOWAIT Song Liu
2021-12-14 0:36 ` [RFC PATCH v4 4/4] md: raid456 add nowait support Song Liu
2021-12-13 23:50 ` [RFC PATCH v4 1/4] md: add support for REQ_NOWAIT Song Liu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=CAPhsuW5vrMcyMHXNyyFUkzdHnWAGs+WUSLwkrrpyt81Bu3ap2g@mail.gmail.com \
--to=song@kernel.org \
--cc=axboe@kernel.dk \
--cc=linux-raid@vger.kernel.org \
--cc=rgoldwyn@suse.de \
--cc=vverma@digitalocean.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).