* Re: dm: Respect REQ_NOWAIT bios [not found] <15ca26cc-174a-d4e8-9780-d09f8e5a6ea5@redhat.com> @ 2023-10-24 17:58 ` Mike Snitzer 2023-10-24 19:18 ` Mikulas Patocka 0 siblings, 1 reply; 7+ messages in thread From: Mike Snitzer @ 2023-10-24 17:58 UTC (permalink / raw) To: Mikulas Patocka, Ming Lei; +Cc: dm-devel, linux-block, axboe For the benefit of others, since this patch was posted to the old dm-devel list, here is the original patch proposal: https://patchwork.kernel.org/project/dm-devel/patch/15ca26cc-174a-d4e8-9780-d09f8e5a6ea5@redhat.com/ On Tue, Oct 03 2023 at 11:30P -0400, Mikulas Patocka <mpatocka@redhat.com> wrote: > Hi > > Here I'm sending that patch for REQ_NOWAIT for review. > > It is not tested, except for some trivial tests that involve logical > volume activation. At a minimum we need to test with the simple test code Jens provided in commit a9ce385344f9 ("dm: don't attempt to queue IO under RCU protection") > I found out that it seems easier to propagate the error using bits in > clone_info rather than changing return codes for each affected function. Yes, the various DM core code that allocates multiple bios, etc certianly benefits from not having to worry about tracking fine-grained return codes. So I've kept the flag in clone_info. > > Mikulas > > > Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Please don't add Signed-off-by to anything you don't want to see going upstream (yet), especially something you haven't tested. Even going so far as adding 'Not-Signed-off-by' is appropriate. More comments inline below, and then at the end I'm attaching an incremental patch that should get us closer to something that works (but I haven't tested yet either). I've also messaged Ming Lei to check with him on the FIXME I added before the call to dm_io_rewind() -- hopefully Ming will reply to this email. > --- > drivers/md/dm.c | 51 +++++++++++++++++++++++++++++++++++++++++++++------ > 1 file changed, 45 insertions(+), 6 deletions(-) > > Index: linux-2.6/drivers/md/dm.c > =================================================================== > --- linux-2.6.orig/drivers/md/dm.c > +++ linux-2.6/drivers/md/dm.c > @@ -87,6 +87,8 @@ struct clone_info { > unsigned int sector_count; > bool is_abnormal_io:1; > bool submit_as_polled:1; > + bool is_nowait:1; > + bool nowait_failed:1; > }; > > static inline struct dm_target_io *clone_to_tio(struct bio *clone) > @@ -570,13 +572,21 @@ static void dm_end_io_acct(struct dm_io > dm_io_acct(io, true); > } > > -static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) > +static struct dm_io *alloc_io(struct clone_info *ci, struct mapped_device *md, struct bio *bio) > { > struct dm_io *io; > struct dm_target_io *tio; > struct bio *clone; > > - clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs); > + if (unlikely(ci->is_nowait)) { > + clone = bio_alloc_clone(NULL, bio, GFP_NOWAIT, &md->mempools->io_bs); > + if (!clone) { > + ci->nowait_failed = true; > + return NULL; > + } > + } else { > + clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs); > + } > tio = clone_to_tio(clone); > tio->flags = 0; > dm_tio_set_flag(tio, DM_TIO_INSIDE_DM_IO); The above is bogus because if the clone cannot be allocated then the returned NULL dm_io struct will cause NULL pointer dereference during error handling at the end of dm_split_and_process_bio() -- my patch attached below addresses this. > @@ -1503,6 +1513,11 @@ static void alloc_multiple_bios(struct b > > while ((bio = bio_list_pop(blist))) > free_tio(bio); > + > + if (ci->is_nowait) { > + ci->nowait_failed = true; > + return; > + } > } > } > > @@ -1519,7 +1534,15 @@ static unsigned int __send_duplicate_bio > case 1: > if (len) > setup_split_accounting(ci, *len); > - clone = alloc_tio(ci, ti, 0, len, GFP_NOIO); > + if (unlikely(ci->is_nowait)) { > + clone = alloc_tio(ci, ti, 0, len, GFP_NOWAIT); > + if (!clone) { > + ci->nowait_failed = true; > + return 0; > + } > + } else { > + clone = alloc_tio(ci, ti, 0, len, GFP_NOIO); > + } > __map_bio(clone); > ret = 1; > break; > @@ -1656,7 +1679,7 @@ static blk_status_t __process_abnormal_i > > __send_changing_extent_only(ci, ti, num_bios, > max_granularity, max_sectors); > - return BLK_STS_OK; > + return likely(!ci->nowait_failed) ? BLK_STS_OK : BLK_STS_AGAIN; > } > > /* > @@ -1729,7 +1752,15 @@ static blk_status_t __split_and_process_ > > len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count); > setup_split_accounting(ci, len); > - clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO); > + if (unlikely(ci->is_nowait)) { > + clone = alloc_tio(ci, ti, 0, &len, GFP_NOWAIT); > + if (unlikely(!clone)) { > + ci->nowait_failed = true; > + return BLK_STS_AGAIN; > + } > + } else { > + clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO); > + } > __map_bio(clone); > > ci->sector += len; > @@ -1741,8 +1772,10 @@ static blk_status_t __split_and_process_ > static void init_clone_info(struct clone_info *ci, struct mapped_device *md, > struct dm_table *map, struct bio *bio, bool is_abnormal) > { > + ci->is_nowait = !!(bio->bi_opf & REQ_NOWAIT); > + ci->nowait_failed = false; > ci->map = map; > - ci->io = alloc_io(md, bio); > + ci->io = alloc_io(ci, md, bio); > ci->bio = bio; > ci->is_abnormal_io = is_abnormal; > ci->submit_as_polled = false; > @@ -1778,10 +1811,16 @@ static void dm_split_and_process_bio(str > } > > init_clone_info(&ci, md, map, bio, is_abnormal); > + if (unlikely(ci.nowait_failed)) { > + error = BLK_STS_AGAIN; > + goto out; > + } > io = ci.io; > > if (bio->bi_opf & REQ_PREFLUSH) { > __send_empty_flush(&ci); > + if (unlikely(ci.nowait_failed)) > + error = BLK_STS_AGAIN; > /* dm_io_complete submits any data associated with flush */ > goto out; > } > Below you'll see I insulated dm_split_and_process_bio() from ever checking ci.nowait_failed -- prefer methods like __send_empty_flush that are called from dm_split_and_process_bio() return blk_status_t (like __process_abnormal_io and __split_and_process_bio do). diff --git a/drivers/md/dm.c b/drivers/md/dm.c index d6fbbaa7600b..2a9ff269c28b 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -572,18 +572,16 @@ static void dm_end_io_acct(struct dm_io *io) dm_io_acct(io, true); } -static struct dm_io *alloc_io(struct clone_info *ci, struct mapped_device *md, struct bio *bio) +static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) { struct dm_io *io; struct dm_target_io *tio; struct bio *clone; - if (unlikely(ci->is_nowait)) { + if (unlikely(bio->bi_opf & REQ_NOWAIT)) { clone = bio_alloc_clone(NULL, bio, GFP_NOWAIT, &md->mempools->io_bs); - if (!clone) { - ci->nowait_failed = true; - return NULL; - } + if (!clone) + return PTR_ERR(-EAGAIN); } else { clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs); } @@ -1001,6 +999,8 @@ static void dm_wq_requeue_work(struct work_struct *work) while (io) { struct dm_io *next = io->next; + // FIXME: if io->orig_bio has REQ_NOWAIT set should GFP_NOWAIT be used? + // requeue performed from completion path so REQ_NOWAIT can be safely dropped? dm_io_rewind(io, &md->disk->bio_split); io->next = NULL; @@ -1565,7 +1565,7 @@ static unsigned int __send_duplicate_bios(struct clone_info *ci, struct dm_targe return ret; } -static void __send_empty_flush(struct clone_info *ci) +static blk_status_t __send_empty_flush(struct clone_info *ci) { struct dm_table *t = ci->map; struct bio flush_bio; @@ -1598,6 +1598,8 @@ static void __send_empty_flush(struct clone_info *ci) atomic_sub(1, &ci->io->io_count); bio_uninit(ci->bio); + + return likely(!ci->nowait_failed) ? BLK_STS_OK : BLK_STS_AGAIN; } static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, @@ -1758,7 +1760,7 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci) if (unlikely(ci->is_nowait)) { clone = alloc_tio(ci, ti, 0, &len, GFP_NOWAIT); if (unlikely(!clone)) { - ci->nowait_failed = true; + ci->nowait_failed = true; /* unchecked, set for consistency */ return BLK_STS_AGAIN; } } else { @@ -1772,13 +1774,13 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci) return BLK_STS_OK; } -static void init_clone_info(struct clone_info *ci, struct mapped_device *md, +static void init_clone_info(struct clone_info *ci, struct dm_io *io, struct dm_table *map, struct bio *bio, bool is_abnormal) { ci->is_nowait = !!(bio->bi_opf & REQ_NOWAIT); ci->nowait_failed = false; ci->map = map; - ci->io = alloc_io(ci, md, bio); + ci->io = io; ci->bio = bio; ci->is_abnormal_io = is_abnormal; ci->submit_as_polled = false; @@ -1813,17 +1815,17 @@ static void dm_split_and_process_bio(struct mapped_device *md, return; } - init_clone_info(&ci, md, map, bio, is_abnormal); - if (unlikely(ci.nowait_failed)) { - error = BLK_STS_AGAIN; - goto out; + io = alloc_io(md, bio); + if (unlikely(IS_ERR(io) && ERR_PTR(io) == -EAGAIN)) { + /* Unable to do anything without dm_io. */ + bio_wouldblock_error(bio); + return; } - io = ci.io; + + init_clone_info(&ci, io, map, bio, is_abnormal); if (bio->bi_opf & REQ_PREFLUSH) { - __send_empty_flush(&ci); - if (unlikely(ci.nowait_failed)) - error = BLK_STS_AGAIN; + error = __send_empty_flush(&ci); /* dm_io_complete submits any data associated with flush */ goto out; } ^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: dm: Respect REQ_NOWAIT bios 2023-10-24 17:58 ` dm: Respect REQ_NOWAIT bios Mike Snitzer @ 2023-10-24 19:18 ` Mikulas Patocka 2023-10-24 19:32 ` Mike Snitzer 0 siblings, 1 reply; 7+ messages in thread From: Mikulas Patocka @ 2023-10-24 19:18 UTC (permalink / raw) To: Mike Snitzer; +Cc: Ming Lei, dm-devel, linux-block, axboe On Tue, 24 Oct 2023, Mike Snitzer wrote: > For the benefit of others, since this patch was posted to the old > dm-devel list, here is the original patch proposal: > https://patchwork.kernel.org/project/dm-devel/patch/15ca26cc-174a-d4e8-9780-d09f8e5a6ea5@redhat.com/ > > On Tue, Oct 03 2023 at 11:30P -0400, > Mikulas Patocka <mpatocka@redhat.com> wrote: > > > Hi > > > > Here I'm sending that patch for REQ_NOWAIT for review. > > > > It is not tested, except for some trivial tests that involve logical > > volume activation. > > At a minimum we need to test with the simple test code Jens provided > in commit a9ce385344f9 ("dm: don't attempt to queue IO under RCU protection") The question is - how do we simulate the memory allocation failures? Do we need to add some test harness that will randomly return NULL to these allocations? Or will we use the kernel fault-injection framework? > > I found out that it seems easier to propagate the error using bits in > > clone_info rather than changing return codes for each affected function. > > Yes, the various DM core code that allocates multiple bios, etc > certianly benefits from not having to worry about tracking > fine-grained return codes. So I've kept the flag in clone_info. > > > > > Mikulas > > > > > > Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> > > Please don't add Signed-off-by to anything you don't want to see going > upstream (yet), especially something you haven't tested. OK > Even going so far as adding 'Not-Signed-off-by' is appropriate. > > More comments inline below, and then at the end I'm attaching an > incremental patch that should get us closer to something that works > (but I haven't tested yet either). I've also messaged Ming Lei to > check with him on the FIXME I added before the call to dm_io_rewind() > -- hopefully Ming will reply to this email. > > > + if (unlikely(ci->is_nowait)) { > > + clone = bio_alloc_clone(NULL, bio, GFP_NOWAIT, &md->mempools->io_bs); > > + if (!clone) { > > + ci->nowait_failed = true; > > + return NULL; > > + } > > + } else { > > + clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs); > > + } > > tio = clone_to_tio(clone); > > tio->flags = 0; > > dm_tio_set_flag(tio, DM_TIO_INSIDE_DM_IO); > > The above is bogus because if the clone cannot be allocated then the > returned NULL dm_io struct will cause NULL pointer dereference during > error handling at the end of dm_split_and_process_bio() -- my patch > attached below addresses this. Yes, thanks for finding it. > Below you'll see I insulated dm_split_and_process_bio() from ever > checking ci.nowait_failed -- prefer methods like __send_empty_flush > that are called from dm_split_and_process_bio() return blk_status_t > (like __process_abnormal_io and __split_and_process_bio do). > > diff --git a/drivers/md/dm.c b/drivers/md/dm.c > index d6fbbaa7600b..2a9ff269c28b 100644 > --- a/drivers/md/dm.c > +++ b/drivers/md/dm.c > @@ -572,18 +572,16 @@ static void dm_end_io_acct(struct dm_io *io) > dm_io_acct(io, true); > } > > -static struct dm_io *alloc_io(struct clone_info *ci, struct mapped_device *md, struct bio *bio) > +static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) > { > struct dm_io *io; > struct dm_target_io *tio; > struct bio *clone; > > - if (unlikely(ci->is_nowait)) { > + if (unlikely(bio->bi_opf & REQ_NOWAIT)) { > clone = bio_alloc_clone(NULL, bio, GFP_NOWAIT, &md->mempools->io_bs); > - if (!clone) { > - ci->nowait_failed = true; > - return NULL; > - } > + if (!clone) > + return PTR_ERR(-EAGAIN); :s/PTR_ERR/ERR_PTR/ If -EAGAIN is the only possible error code, should we return NULL instead? > } else { > clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs); > } > @@ -1001,6 +999,8 @@ static void dm_wq_requeue_work(struct work_struct *work) > while (io) { > struct dm_io *next = io->next; > > + // FIXME: if io->orig_bio has REQ_NOWAIT set should GFP_NOWAIT be used? > + // requeue performed from completion path so REQ_NOWAIT can be safely dropped? > dm_io_rewind(io, &md->disk->bio_split); > > io->next = NULL; > @@ -1565,7 +1565,7 @@ static unsigned int __send_duplicate_bios(struct clone_info *ci, struct dm_targe > return ret; > } > > -static void __send_empty_flush(struct clone_info *ci) > +static blk_status_t __send_empty_flush(struct clone_info *ci) > { > struct dm_table *t = ci->map; > struct bio flush_bio; > @@ -1598,6 +1598,8 @@ static void __send_empty_flush(struct clone_info *ci) > atomic_sub(1, &ci->io->io_count); > > bio_uninit(ci->bio); > + > + return likely(!ci->nowait_failed) ? BLK_STS_OK : BLK_STS_AGAIN; > } > > static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, > @@ -1758,7 +1760,7 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci) > if (unlikely(ci->is_nowait)) { > clone = alloc_tio(ci, ti, 0, &len, GFP_NOWAIT); > if (unlikely(!clone)) { > - ci->nowait_failed = true; > + ci->nowait_failed = true; /* unchecked, set for consistency */ > return BLK_STS_AGAIN; > } > } else { > @@ -1772,13 +1774,13 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci) > return BLK_STS_OK; > } > > -static void init_clone_info(struct clone_info *ci, struct mapped_device *md, > +static void init_clone_info(struct clone_info *ci, struct dm_io *io, > struct dm_table *map, struct bio *bio, bool is_abnormal) > { > ci->is_nowait = !!(bio->bi_opf & REQ_NOWAIT); > ci->nowait_failed = false; > ci->map = map; > - ci->io = alloc_io(ci, md, bio); > + ci->io = io; > ci->bio = bio; > ci->is_abnormal_io = is_abnormal; > ci->submit_as_polled = false; > @@ -1813,17 +1815,17 @@ static void dm_split_and_process_bio(struct mapped_device *md, > return; > } > > - init_clone_info(&ci, md, map, bio, is_abnormal); > - if (unlikely(ci.nowait_failed)) { > - error = BLK_STS_AGAIN; > - goto out; > + io = alloc_io(md, bio); > + if (unlikely(IS_ERR(io) && ERR_PTR(io) == -EAGAIN)) { :s/ERR_PTR/PTR_ERR/ > + /* Unable to do anything without dm_io. */ > + bio_wouldblock_error(bio); > + return; I would use "if (unlikely(IS_ERR(io)) { bio->bi_status = errno_to_blk_status(PTR_ERR(io)); bio_set_flag(bio, BIO_QUIET); bio_endio(bio); return;", so that all possible error codes (that could be added in the future) are propageted. Or, if you change alloc_io to return NULL, you can leave bio_wouldblock_error here. > } > - io = ci.io; > + > + init_clone_info(&ci, io, map, bio, is_abnormal); > > if (bio->bi_opf & REQ_PREFLUSH) { > - __send_empty_flush(&ci); > - if (unlikely(ci.nowait_failed)) > - error = BLK_STS_AGAIN; > + error = __send_empty_flush(&ci); > /* dm_io_complete submits any data associated with flush */ > goto out; > } > Mikulas ^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: dm: Respect REQ_NOWAIT bios 2023-10-24 19:18 ` Mikulas Patocka @ 2023-10-24 19:32 ` Mike Snitzer 2023-10-25 19:34 ` [PATCH v2] dm: respect REQ_NOWAIT flag in bios issued to DM Mike Snitzer 0 siblings, 1 reply; 7+ messages in thread From: Mike Snitzer @ 2023-10-24 19:32 UTC (permalink / raw) To: Mikulas Patocka; +Cc: Ming Lei, dm-devel, linux-block, axboe On Tue, Oct 24 2023 at 3:18P -0400, Mikulas Patocka <mpatocka@redhat.com> wrote: > > > On Tue, 24 Oct 2023, Mike Snitzer wrote: > > > For the benefit of others, since this patch was posted to the old > > dm-devel list, here is the original patch proposal: > > https://patchwork.kernel.org/project/dm-devel/patch/15ca26cc-174a-d4e8-9780-d09f8e5a6ea5@redhat.com/ > > > > On Tue, Oct 03 2023 at 11:30P -0400, > > Mikulas Patocka <mpatocka@redhat.com> wrote: > > > > > Hi > > > > > > Here I'm sending that patch for REQ_NOWAIT for review. > > > > > > It is not tested, except for some trivial tests that involve logical > > > volume activation. > > > > At a minimum we need to test with the simple test code Jens provided > > in commit a9ce385344f9 ("dm: don't attempt to queue IO under RCU protection") > > The question is - how do we simulate the memory allocation failures? Do we > need to add some test harness that will randomly return NULL to these > allocations? Or will we use the kernel fault-injection framework? Will think about it (and do research). Maybe others have suggestions? > > Below you'll see I insulated dm_split_and_process_bio() from ever > > checking ci.nowait_failed -- prefer methods like __send_empty_flush > > that are called from dm_split_and_process_bio() return blk_status_t > > (like __process_abnormal_io and __split_and_process_bio do). > > > > diff --git a/drivers/md/dm.c b/drivers/md/dm.c > > index d6fbbaa7600b..2a9ff269c28b 100644 > > --- a/drivers/md/dm.c > > +++ b/drivers/md/dm.c > > @@ -572,18 +572,16 @@ static void dm_end_io_acct(struct dm_io *io) > > dm_io_acct(io, true); > > } > > > > -static struct dm_io *alloc_io(struct clone_info *ci, struct mapped_device *md, struct bio *bio) > > +static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) > > { > > struct dm_io *io; > > struct dm_target_io *tio; > > struct bio *clone; > > > > - if (unlikely(ci->is_nowait)) { > > + if (unlikely(bio->bi_opf & REQ_NOWAIT)) { > > clone = bio_alloc_clone(NULL, bio, GFP_NOWAIT, &md->mempools->io_bs); > > - if (!clone) { > > - ci->nowait_failed = true; > > - return NULL; > > - } > > + if (!clone) > > + return PTR_ERR(-EAGAIN); > > :s/PTR_ERR/ERR_PTR/ Ha, not sure how I inverted it.. but thanks. > If -EAGAIN is the only possible error code, should we return NULL instead? Yeah, thought about that. Think returning NULL is fine and if/when other reasons for failure possible we can extend as needed. > > @@ -1813,17 +1815,17 @@ static void dm_split_and_process_bio(struct mapped_device *md, > > return; > > } > > > > - init_clone_info(&ci, md, map, bio, is_abnormal); > > - if (unlikely(ci.nowait_failed)) { > > - error = BLK_STS_AGAIN; > > - goto out; > > + io = alloc_io(md, bio); > > + if (unlikely(IS_ERR(io) && ERR_PTR(io) == -EAGAIN)) { > :s/ERR_PTR/PTR_ERR/ Yes. > > + /* Unable to do anything without dm_io. */ > > + bio_wouldblock_error(bio); > > + return; > > I would use "if (unlikely(IS_ERR(io)) { > bio->bi_status = errno_to_blk_status(PTR_ERR(io)); > bio_set_flag(bio, BIO_QUIET); > bio_endio(bio); > return;", > so that all possible error codes (that could be added in the future) are > propageted. > > Or, if you change alloc_io to return NULL, you can leave > bio_wouldblock_error here. Yeah, will just return NULL and stick with bio_wouldblock_error() for now. Thanks, Mike ^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH v2] dm: respect REQ_NOWAIT flag in bios issued to DM 2023-10-24 19:32 ` Mike Snitzer @ 2023-10-25 19:34 ` Mike Snitzer 2023-10-25 19:55 ` Mike Snitzer ` (2 more replies) 0 siblings, 3 replies; 7+ messages in thread From: Mike Snitzer @ 2023-10-25 19:34 UTC (permalink / raw) To: Mikulas Patocka; +Cc: Ming Lei, dm-devel, linux-block, axboe From: Mikulas Patocka <mpatocka@redhat.com> Update DM core's IO submission to allocate required memory using GFP_NOWAIT if REQ_NOWAIT is set. Lone exception is in the error path where DM's requeue support needs to allocate a clone bio in dm_io_rewind() to allow the IO to be resubmitted: GFP_NOWAIT is used first but if it fails GFP_NOIO is used as a last resort. Tested with simple test provided in commit a9ce385344f916 ("dm: don't attempt to queue IO under RCU protection") that was enhanced to check error codes. Also tested using fio's pvsync2 with nowait=1. But testing with induced GFP_NOWAIT allocation failures wasn't performed. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Co-developed-by: Mike Snitzer <snitzer@kernel.org> Signed-off-by: Mike Snitzer <snitzer@kernel.org> --- drivers/md/dm-io-rewind.c | 13 ++++++-- drivers/md/dm.c | 66 ++++++++++++++++++++++++++++++++------- 2 files changed, 65 insertions(+), 14 deletions(-) diff --git a/drivers/md/dm-io-rewind.c b/drivers/md/dm-io-rewind.c index 6155b0117c9d..bde5a53e2d88 100644 --- a/drivers/md/dm-io-rewind.c +++ b/drivers/md/dm-io-rewind.c @@ -143,8 +143,17 @@ static void dm_bio_rewind(struct bio *bio, unsigned int bytes) void dm_io_rewind(struct dm_io *io, struct bio_set *bs) { struct bio *orig = io->orig_bio; - struct bio *new_orig = bio_alloc_clone(orig->bi_bdev, orig, - GFP_NOIO, bs); + struct bio *new_orig; + + new_orig = bio_alloc_clone(orig->bi_bdev, orig, GFP_NOWAIT, bs); + if (unlikely(!new_orig)) { + /* + * Returning early and failing rewind isn't an option, even + * if orig has REQ_NOWAIT set, so retry alloc with GFP_NOIO. + */ + new_orig = bio_alloc_clone(orig->bi_bdev, orig, GFP_NOIO, bs); + } + /* * dm_bio_rewind can restore to previous position since the * end sector is fixed for original bio, but we still need diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 1113a8da3c47..b0e1425af475 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -87,6 +87,8 @@ struct clone_info { unsigned int sector_count; bool is_abnormal_io:1; bool submit_as_polled:1; + bool is_nowait:1; + bool nowait_failed:1; }; static inline struct dm_target_io *clone_to_tio(struct bio *clone) @@ -576,7 +578,13 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) struct dm_target_io *tio; struct bio *clone; - clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs); + if (unlikely(bio->bi_opf & REQ_NOWAIT)) { + clone = bio_alloc_clone(NULL, bio, GFP_NOWAIT, &md->mempools->io_bs); + if (!clone) + return NULL; + } else { + clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs); + } tio = clone_to_tio(clone); tio->flags = 0; dm_tio_set_flag(tio, DM_TIO_INSIDE_DM_IO); @@ -1500,9 +1508,17 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, mutex_unlock(&ci->io->md->table_devices_lock); if (bio_nr == num_bios) return; - + /* + * Failed to allocate all bios, all or nothing, so rewind partial + * allocations and short-circuit retry with GFP_NOIO if 'is_nowait' + */ while ((bio = bio_list_pop(blist))) free_tio(bio); + + if (ci->is_nowait) { + ci->nowait_failed = true; + return; + } } } @@ -1519,7 +1535,15 @@ static unsigned int __send_duplicate_bios(struct clone_info *ci, struct dm_targe case 1: if (len) setup_split_accounting(ci, *len); - clone = alloc_tio(ci, ti, 0, len, GFP_NOIO); + if (unlikely(ci->is_nowait)) { + clone = alloc_tio(ci, ti, 0, len, GFP_NOWAIT); + if (!clone) { + ci->nowait_failed = true; + return 0; + } + } else { + clone = alloc_tio(ci, ti, 0, len, GFP_NOIO); + } __map_bio(clone); ret = 1; break; @@ -1539,7 +1563,7 @@ static unsigned int __send_duplicate_bios(struct clone_info *ci, struct dm_targe return ret; } -static void __send_empty_flush(struct clone_info *ci) +static blk_status_t __send_empty_flush(struct clone_info *ci) { struct dm_table *t = ci->map; struct bio flush_bio; @@ -1572,6 +1596,8 @@ static void __send_empty_flush(struct clone_info *ci) atomic_sub(1, &ci->io->io_count); bio_uninit(ci->bio); + + return likely(!ci->nowait_failed) ? BLK_STS_OK : BLK_STS_AGAIN; } static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, @@ -1656,7 +1682,7 @@ static blk_status_t __process_abnormal_io(struct clone_info *ci, __send_changing_extent_only(ci, ti, num_bios, max_granularity, max_sectors); - return BLK_STS_OK; + return likely(!ci->nowait_failed) ? BLK_STS_OK : BLK_STS_AGAIN; } /* @@ -1714,7 +1740,7 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci) if (unlikely(!ti)) return BLK_STS_IOERR; - if (unlikely((ci->bio->bi_opf & REQ_NOWAIT) != 0) && + if (unlikely(ci->is_nowait) && unlikely(!dm_target_supports_nowait(ti->type))) return BLK_STS_NOTSUPP; @@ -1729,7 +1755,15 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci) len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count); setup_split_accounting(ci, len); - clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO); + if (unlikely(ci->is_nowait)) { + clone = alloc_tio(ci, ti, 0, &len, GFP_NOWAIT); + if (unlikely(!clone)) { + ci->nowait_failed = true; /* unchecked, set for consistency */ + return BLK_STS_AGAIN; + } + } else { + clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO); + } __map_bio(clone); ci->sector += len; @@ -1738,11 +1772,13 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci) return BLK_STS_OK; } -static void init_clone_info(struct clone_info *ci, struct mapped_device *md, +static void init_clone_info(struct clone_info *ci, struct dm_io *io, struct dm_table *map, struct bio *bio, bool is_abnormal) { + ci->is_nowait = !!(bio->bi_opf & REQ_NOWAIT); + ci->nowait_failed = false; ci->map = map; - ci->io = alloc_io(md, bio); + ci->io = io; ci->bio = bio; ci->is_abnormal_io = is_abnormal; ci->submit_as_polled = false; @@ -1777,11 +1813,17 @@ static void dm_split_and_process_bio(struct mapped_device *md, return; } - init_clone_info(&ci, md, map, bio, is_abnormal); - io = ci.io; + io = alloc_io(md, bio); + if (!io) { + /* Unable to do anything without dm_io. */ + bio_wouldblock_error(bio); + return; + } + + init_clone_info(&ci, io, map, bio, is_abnormal); if (bio->bi_opf & REQ_PREFLUSH) { - __send_empty_flush(&ci); + error = __send_empty_flush(&ci); /* dm_io_complete submits any data associated with flush */ goto out; } -- 2.40.0 ^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCH v2] dm: respect REQ_NOWAIT flag in bios issued to DM 2023-10-25 19:34 ` [PATCH v2] dm: respect REQ_NOWAIT flag in bios issued to DM Mike Snitzer @ 2023-10-25 19:55 ` Mike Snitzer 2023-10-25 23:44 ` Ming Lei 2023-10-26 0:12 ` [PATCH v3] dm: respect REQ_NOWAIT flag in normal " Mike Snitzer 2 siblings, 0 replies; 7+ messages in thread From: Mike Snitzer @ 2023-10-25 19:55 UTC (permalink / raw) To: Mikulas Patocka; +Cc: Ming Lei, dm-devel, linux-block, axboe On Wed, Oct 25 2023 at 3:34P -0400, Mike Snitzer <snitzer@kernel.org> wrote: > From: Mikulas Patocka <mpatocka@redhat.com> > > Update DM core's IO submission to allocate required memory using > GFP_NOWAIT if REQ_NOWAIT is set. Lone exception is in the error path > where DM's requeue support needs to allocate a clone bio in > dm_io_rewind() to allow the IO to be resubmitted: GFP_NOWAIT is used > first but if it fails GFP_NOIO is used as a last resort. > > Tested with simple test provided in commit a9ce385344f916 ("dm: don't > attempt to queue IO under RCU protection") that was enhanced to check > error codes. Also tested using fio's pvsync2 with nowait=1. > > But testing with induced GFP_NOWAIT allocation failures wasn't > performed. > > Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> > Co-developed-by: Mike Snitzer <snitzer@kernel.org> > Signed-off-by: Mike Snitzer <snitzer@kernel.org> > --- > drivers/md/dm-io-rewind.c | 13 ++++++-- > drivers/md/dm.c | 66 ++++++++++++++++++++++++++++++++------- > 2 files changed, 65 insertions(+), 14 deletions(-) Backfilling some comments on this v2: 1) I'm not completely clear if REQ_NOWAIT not ever sleeping (waiting on IO) is a hint vs a hard requirement -- so the dm_io_rewind() change to try GFP_NOWAIT and failback to GFP_NOIO might be bogus... 2) What is a good way to reliably simulate low memory allocations such that mempool backed GFP_NOWAIT allocations fail? Thanks for any review/feedback. Mike > > diff --git a/drivers/md/dm-io-rewind.c b/drivers/md/dm-io-rewind.c > index 6155b0117c9d..bde5a53e2d88 100644 > --- a/drivers/md/dm-io-rewind.c > +++ b/drivers/md/dm-io-rewind.c > @@ -143,8 +143,17 @@ static void dm_bio_rewind(struct bio *bio, unsigned int bytes) > void dm_io_rewind(struct dm_io *io, struct bio_set *bs) > { > struct bio *orig = io->orig_bio; > - struct bio *new_orig = bio_alloc_clone(orig->bi_bdev, orig, > - GFP_NOIO, bs); > + struct bio *new_orig; > + > + new_orig = bio_alloc_clone(orig->bi_bdev, orig, GFP_NOWAIT, bs); > + if (unlikely(!new_orig)) { > + /* > + * Returning early and failing rewind isn't an option, even > + * if orig has REQ_NOWAIT set, so retry alloc with GFP_NOIO. > + */ > + new_orig = bio_alloc_clone(orig->bi_bdev, orig, GFP_NOIO, bs); > + } > + > /* > * dm_bio_rewind can restore to previous position since the > * end sector is fixed for original bio, but we still need > diff --git a/drivers/md/dm.c b/drivers/md/dm.c > index 1113a8da3c47..b0e1425af475 100644 > --- a/drivers/md/dm.c > +++ b/drivers/md/dm.c > @@ -87,6 +87,8 @@ struct clone_info { > unsigned int sector_count; > bool is_abnormal_io:1; > bool submit_as_polled:1; > + bool is_nowait:1; > + bool nowait_failed:1; > }; > > static inline struct dm_target_io *clone_to_tio(struct bio *clone) > @@ -576,7 +578,13 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) > struct dm_target_io *tio; > struct bio *clone; > > - clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs); > + if (unlikely(bio->bi_opf & REQ_NOWAIT)) { > + clone = bio_alloc_clone(NULL, bio, GFP_NOWAIT, &md->mempools->io_bs); > + if (!clone) > + return NULL; > + } else { > + clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs); > + } > tio = clone_to_tio(clone); > tio->flags = 0; > dm_tio_set_flag(tio, DM_TIO_INSIDE_DM_IO); > @@ -1500,9 +1508,17 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, > mutex_unlock(&ci->io->md->table_devices_lock); > if (bio_nr == num_bios) > return; > - > + /* > + * Failed to allocate all bios, all or nothing, so rewind partial > + * allocations and short-circuit retry with GFP_NOIO if 'is_nowait' > + */ > while ((bio = bio_list_pop(blist))) > free_tio(bio); > + > + if (ci->is_nowait) { > + ci->nowait_failed = true; > + return; > + } > } > } > > @@ -1519,7 +1535,15 @@ static unsigned int __send_duplicate_bios(struct clone_info *ci, struct dm_targe > case 1: > if (len) > setup_split_accounting(ci, *len); > - clone = alloc_tio(ci, ti, 0, len, GFP_NOIO); > + if (unlikely(ci->is_nowait)) { > + clone = alloc_tio(ci, ti, 0, len, GFP_NOWAIT); > + if (!clone) { > + ci->nowait_failed = true; > + return 0; > + } > + } else { > + clone = alloc_tio(ci, ti, 0, len, GFP_NOIO); > + } > __map_bio(clone); > ret = 1; > break; > @@ -1539,7 +1563,7 @@ static unsigned int __send_duplicate_bios(struct clone_info *ci, struct dm_targe > return ret; > } > > -static void __send_empty_flush(struct clone_info *ci) > +static blk_status_t __send_empty_flush(struct clone_info *ci) > { > struct dm_table *t = ci->map; > struct bio flush_bio; > @@ -1572,6 +1596,8 @@ static void __send_empty_flush(struct clone_info *ci) > atomic_sub(1, &ci->io->io_count); > > bio_uninit(ci->bio); > + > + return likely(!ci->nowait_failed) ? BLK_STS_OK : BLK_STS_AGAIN; > } > > static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, > @@ -1656,7 +1682,7 @@ static blk_status_t __process_abnormal_io(struct clone_info *ci, > > __send_changing_extent_only(ci, ti, num_bios, > max_granularity, max_sectors); > - return BLK_STS_OK; > + return likely(!ci->nowait_failed) ? BLK_STS_OK : BLK_STS_AGAIN; > } > > /* > @@ -1714,7 +1740,7 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci) > if (unlikely(!ti)) > return BLK_STS_IOERR; > > - if (unlikely((ci->bio->bi_opf & REQ_NOWAIT) != 0) && > + if (unlikely(ci->is_nowait) && > unlikely(!dm_target_supports_nowait(ti->type))) > return BLK_STS_NOTSUPP; > > @@ -1729,7 +1755,15 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci) > > len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count); > setup_split_accounting(ci, len); > - clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO); > + if (unlikely(ci->is_nowait)) { > + clone = alloc_tio(ci, ti, 0, &len, GFP_NOWAIT); > + if (unlikely(!clone)) { > + ci->nowait_failed = true; /* unchecked, set for consistency */ > + return BLK_STS_AGAIN; > + } > + } else { > + clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO); > + } > __map_bio(clone); > > ci->sector += len; > @@ -1738,11 +1772,13 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci) > return BLK_STS_OK; > } > > -static void init_clone_info(struct clone_info *ci, struct mapped_device *md, > +static void init_clone_info(struct clone_info *ci, struct dm_io *io, > struct dm_table *map, struct bio *bio, bool is_abnormal) > { > + ci->is_nowait = !!(bio->bi_opf & REQ_NOWAIT); > + ci->nowait_failed = false; > ci->map = map; > - ci->io = alloc_io(md, bio); > + ci->io = io; > ci->bio = bio; > ci->is_abnormal_io = is_abnormal; > ci->submit_as_polled = false; > @@ -1777,11 +1813,17 @@ static void dm_split_and_process_bio(struct mapped_device *md, > return; > } > > - init_clone_info(&ci, md, map, bio, is_abnormal); > - io = ci.io; > + io = alloc_io(md, bio); > + if (!io) { > + /* Unable to do anything without dm_io. */ > + bio_wouldblock_error(bio); > + return; > + } > + > + init_clone_info(&ci, io, map, bio, is_abnormal); > > if (bio->bi_opf & REQ_PREFLUSH) { > - __send_empty_flush(&ci); > + error = __send_empty_flush(&ci); > /* dm_io_complete submits any data associated with flush */ > goto out; > } > -- > 2.40.0 > ^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH v2] dm: respect REQ_NOWAIT flag in bios issued to DM 2023-10-25 19:34 ` [PATCH v2] dm: respect REQ_NOWAIT flag in bios issued to DM Mike Snitzer 2023-10-25 19:55 ` Mike Snitzer @ 2023-10-25 23:44 ` Ming Lei 2023-10-26 0:12 ` [PATCH v3] dm: respect REQ_NOWAIT flag in normal " Mike Snitzer 2 siblings, 0 replies; 7+ messages in thread From: Ming Lei @ 2023-10-25 23:44 UTC (permalink / raw) To: Mike Snitzer; +Cc: Mikulas Patocka, dm-devel, linux-block, axboe, ming.lei On Wed, Oct 25, 2023 at 03:34:40PM -0400, Mike Snitzer wrote: > From: Mikulas Patocka <mpatocka@redhat.com> > > Update DM core's IO submission to allocate required memory using > GFP_NOWAIT if REQ_NOWAIT is set. Lone exception is in the error path > where DM's requeue support needs to allocate a clone bio in > dm_io_rewind() to allow the IO to be resubmitted: GFP_NOWAIT is used > first but if it fails GFP_NOIO is used as a last resort. > > Tested with simple test provided in commit a9ce385344f916 ("dm: don't > attempt to queue IO under RCU protection") that was enhanced to check > error codes. Also tested using fio's pvsync2 with nowait=1. > > But testing with induced GFP_NOWAIT allocation failures wasn't > performed. > > Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> > Co-developed-by: Mike Snitzer <snitzer@kernel.org> > Signed-off-by: Mike Snitzer <snitzer@kernel.org> > --- > drivers/md/dm-io-rewind.c | 13 ++++++-- > drivers/md/dm.c | 66 ++++++++++++++++++++++++++++++++------- > 2 files changed, 65 insertions(+), 14 deletions(-) > > diff --git a/drivers/md/dm-io-rewind.c b/drivers/md/dm-io-rewind.c > index 6155b0117c9d..bde5a53e2d88 100644 > --- a/drivers/md/dm-io-rewind.c > +++ b/drivers/md/dm-io-rewind.c > @@ -143,8 +143,17 @@ static void dm_bio_rewind(struct bio *bio, unsigned int bytes) > void dm_io_rewind(struct dm_io *io, struct bio_set *bs) > { > struct bio *orig = io->orig_bio; > - struct bio *new_orig = bio_alloc_clone(orig->bi_bdev, orig, > - GFP_NOIO, bs); > + struct bio *new_orig; > + > + new_orig = bio_alloc_clone(orig->bi_bdev, orig, GFP_NOWAIT, bs); > + if (unlikely(!new_orig)) { > + /* > + * Returning early and failing rewind isn't an option, even > + * if orig has REQ_NOWAIT set, so retry alloc with GFP_NOIO. > + */ > + new_orig = bio_alloc_clone(orig->bi_bdev, orig, GFP_NOIO, bs); > + } dm_bio_rewind() is only called in requeue work fn, so there shouldn't be issue with NOWAIT, cause upper IO submission code path won't be blocked by requeue. Thanks, Ming ^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH v3] dm: respect REQ_NOWAIT flag in normal bios issued to DM 2023-10-25 19:34 ` [PATCH v2] dm: respect REQ_NOWAIT flag in bios issued to DM Mike Snitzer 2023-10-25 19:55 ` Mike Snitzer 2023-10-25 23:44 ` Ming Lei @ 2023-10-26 0:12 ` Mike Snitzer 2 siblings, 0 replies; 7+ messages in thread From: Mike Snitzer @ 2023-10-26 0:12 UTC (permalink / raw) To: Mikulas Patocka; +Cc: Ming Lei, dm-devel, linux-block, axboe Update DM core's normal IO submission to allocate required memory using GFP_NOWAIT if REQ_NOWAIT is set. Tested with simple test provided in commit a9ce385344f916 ("dm: don't attempt to queue IO under RCU protection") that was enhanced to check error codes. Also tested using fio's pvsync2 with nowait=1. But testing with induced GFP_NOWAIT allocation failures wasn't performed (yet). Signed-off-by: Mike Snitzer <snitzer@kernel.org> --- drivers/md/dm.c | 50 ++++++++++++++++++++++++++++++++++++------------- 1 file changed, 37 insertions(+), 13 deletions(-) v3: - followed Mikulas's suggestion of only supporting NOWAIT for normal IO - changed attribution to me since basically all the code was rewritten - removed dm_io_rewind() changes thanks to Ming's reminder that DM's requeue won't block IO submission because it is called from worker (so it using GFP_NOIO is fine). diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 1113a8da3c47..609c68287158 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -570,13 +570,15 @@ static void dm_end_io_acct(struct dm_io *io) dm_io_acct(io, true); } -static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) +static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio, gfp_t gfp_mask) { struct dm_io *io; struct dm_target_io *tio; struct bio *clone; - clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs); + clone = bio_alloc_clone(NULL, bio, gfp_mask, &md->mempools->io_bs); + if (unlikely(!clone)) + return NULL; tio = clone_to_tio(clone); tio->flags = 0; dm_tio_set_flag(tio, DM_TIO_INSIDE_DM_IO); @@ -1714,10 +1716,6 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci) if (unlikely(!ti)) return BLK_STS_IOERR; - if (unlikely((ci->bio->bi_opf & REQ_NOWAIT) != 0) && - unlikely(!dm_target_supports_nowait(ti->type))) - return BLK_STS_NOTSUPP; - if (unlikely(ci->is_abnormal_io)) return __process_abnormal_io(ci, ti); @@ -1729,7 +1727,17 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci) len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count); setup_split_accounting(ci, len); - clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO); + + if (unlikely(ci->bio->bi_opf & REQ_NOWAIT)) { + if (unlikely(!dm_target_supports_nowait(ti->type))) + return BLK_STS_NOTSUPP; + + clone = alloc_tio(ci, ti, 0, &len, GFP_NOWAIT); + if (unlikely(!clone)) + return BLK_STS_AGAIN; + } else { + clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO); + } __map_bio(clone); ci->sector += len; @@ -1738,11 +1746,11 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci) return BLK_STS_OK; } -static void init_clone_info(struct clone_info *ci, struct mapped_device *md, +static void init_clone_info(struct clone_info *ci, struct dm_io *io, struct dm_table *map, struct bio *bio, bool is_abnormal) { ci->map = map; - ci->io = alloc_io(md, bio); + ci->io = io; ci->bio = bio; ci->is_abnormal_io = is_abnormal; ci->submit_as_polled = false; @@ -1764,7 +1772,7 @@ static void dm_split_and_process_bio(struct mapped_device *md, struct clone_info ci; struct dm_io *io; blk_status_t error = BLK_STS_OK; - bool is_abnormal; + bool is_abnormal, is_preflush = !!(bio->bi_opf & REQ_PREFLUSH); is_abnormal = is_abnormal_io(bio); if (unlikely(is_abnormal)) { @@ -1777,10 +1785,26 @@ static void dm_split_and_process_bio(struct mapped_device *md, return; } - init_clone_info(&ci, md, map, bio, is_abnormal); - io = ci.io; + if (unlikely(bio->bi_opf & REQ_NOWAIT)) { + /* Only support nowait for normal IO */ + if (unlikely(is_preflush || is_abnormal)) { + bio->bi_status = BLK_STS_NOTSUPP; + bio_endio(bio); + return; + } - if (bio->bi_opf & REQ_PREFLUSH) { + io = alloc_io(md, bio, GFP_NOWAIT); + if (unlikely(!io)) { + /* Unable to do anything without dm_io. */ + bio_wouldblock_error(bio); + return; + } + } else { + io = alloc_io(md, bio, GFP_NOIO); + } + init_clone_info(&ci, io, map, bio, is_abnormal); + + if (is_preflush) { __send_empty_flush(&ci); /* dm_io_complete submits any data associated with flush */ goto out; -- 2.40.0 ^ permalink raw reply related [flat|nested] 7+ messages in thread
end of thread, other threads:[~2023-10-26 0:13 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
[not found] <15ca26cc-174a-d4e8-9780-d09f8e5a6ea5@redhat.com>
2023-10-24 17:58 ` dm: Respect REQ_NOWAIT bios Mike Snitzer
2023-10-24 19:18 ` Mikulas Patocka
2023-10-24 19:32 ` Mike Snitzer
2023-10-25 19:34 ` [PATCH v2] dm: respect REQ_NOWAIT flag in bios issued to DM Mike Snitzer
2023-10-25 19:55 ` Mike Snitzer
2023-10-25 23:44 ` Ming Lei
2023-10-26 0:12 ` [PATCH v3] dm: respect REQ_NOWAIT flag in normal " Mike Snitzer
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).