* [PATCH 1/2] blk-mq: allow REQ_NOWAIT to return an error inline
[not found] <20190717150445.1131-1-axboe@kernel.dk>
@ 2019-07-17 15:04 ` Jens Axboe
2019-07-17 15:04 ` [PATCH 2/2] block: properly handle IOCB_NOWAIT for async O_DIRECT IO Jens Axboe
1 sibling, 0 replies; 2+ messages in thread
From: Jens Axboe @ 2019-07-17 15:04 UTC (permalink / raw)
To: linux-block; +Cc: Filipp.Mikoian, Jens Axboe
By default, if a caller sets REQ_NOWAIT and we need to block, we'll
return -EAGAIN through the bio->bi_end_io() callback. For some use
cases, this makes it hard to use.
Allow a caller to ask for inline return of errors related to
blocking by also setting REQ_NOWAIT_INLINE.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
block/blk-mq.c | 9 +++++++--
include/linux/blk_types.h | 5 ++++-
2 files changed, 11 insertions(+), 3 deletions(-)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b038ec680e84..ac827e7e3bbe 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1960,9 +1960,14 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
rq = blk_mq_get_request(q, bio, &data);
if (unlikely(!rq)) {
rq_qos_cleanup(q, bio);
- if (bio->bi_opf & REQ_NOWAIT)
+
+ if (bio->bi_opf & REQ_NOWAIT_INLINE) {
+ cookie = BLK_QC_T_EAGAIN;
+ } else if (bio->bi_opf & REQ_NOWAIT) {
bio_wouldblock_error(bio);
- return BLK_QC_T_NONE;
+ cookie = BLK_QC_T_NONE;
+ }
+ return cookie;
}
trace_block_getrq(q, bio, bio->bi_opf);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index feff3fe4467e..1b1fa1557e68 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -311,6 +311,7 @@ enum req_flag_bits {
__REQ_RAHEAD, /* read ahead, can fail anytime */
__REQ_BACKGROUND, /* background IO */
__REQ_NOWAIT, /* Don't wait if request will block */
+ __REQ_NOWAIT_INLINE, /* Return would-block error inline */
/*
* When a shared kthread needs to issue a bio for a cgroup, doing
* so synchronously can lead to priority inversions as the kthread
@@ -345,6 +346,7 @@ enum req_flag_bits {
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
+#define REQ_NOWAIT_INLINE (1ULL << __REQ_NOWAIT_INLINE)
#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT)
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
@@ -418,12 +420,13 @@ static inline int op_stat_group(unsigned int op)
typedef unsigned int blk_qc_t;
#define BLK_QC_T_NONE -1U
+#define BLK_QC_T_EAGAIN -2U
#define BLK_QC_T_SHIFT 16
#define BLK_QC_T_INTERNAL (1U << 31)
static inline bool blk_qc_t_valid(blk_qc_t cookie)
{
- return cookie != BLK_QC_T_NONE;
+ return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN;
}
static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
--
2.17.1
^ permalink raw reply related [flat|nested] 2+ messages in thread
* [PATCH 2/2] block: properly handle IOCB_NOWAIT for async O_DIRECT IO
[not found] <20190717150445.1131-1-axboe@kernel.dk>
2019-07-17 15:04 ` [PATCH 1/2] blk-mq: allow REQ_NOWAIT to return an error inline Jens Axboe
@ 2019-07-17 15:04 ` Jens Axboe
1 sibling, 0 replies; 2+ messages in thread
From: Jens Axboe @ 2019-07-17 15:04 UTC (permalink / raw)
To: linux-block; +Cc: Filipp.Mikoian, Jens Axboe
A caller is supposed to pass in REQ_NOWAIT if we can't block for any
given operation, but O_DIRECT for block devices just ignore this. Hence
we'll block for various resource shortages on the block layer side,
like having to wait for requests.
Use the new REQ_NOWAIT_INLINE to ask for this error to be returned
inline, so we can handle it appropriately and return -EAGAIN to the
caller.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
fs/block_dev.c | 41 ++++++++++++++++++++++++++++++++++++++---
1 file changed, 38 insertions(+), 3 deletions(-)
diff --git a/fs/block_dev.c b/fs/block_dev.c
index f00b569a9f89..7c4da2e2e5be 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -344,15 +344,24 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
struct bio *bio;
bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0;
bool is_read = (iov_iter_rw(iter) == READ), is_sync;
+ bool nowait = (iocb->ki_flags & IOCB_NOWAIT) != 0;
loff_t pos = iocb->ki_pos;
blk_qc_t qc = BLK_QC_T_NONE;
+ gfp_t gfp;
int ret = 0;
if ((pos | iov_iter_alignment(iter)) &
(bdev_logical_block_size(bdev) - 1))
return -EINVAL;
- bio = bio_alloc_bioset(GFP_KERNEL, nr_pages, &blkdev_dio_pool);
+ if (nowait)
+ gfp = GFP_NOWAIT;
+ else
+ gfp = GFP_KERNEL;
+
+ bio = bio_alloc_bioset(gfp, nr_pages, &blkdev_dio_pool);
+ if (!bio)
+ return -EAGAIN;
dio = container_of(bio, struct blkdev_dio, bio);
dio->is_sync = is_sync = is_sync_kiocb(iocb);
@@ -398,6 +407,14 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
task_io_account_write(bio->bi_iter.bi_size);
}
+ /*
+ * Tell underlying layer to not block for resource shortage.
+ * And if we would have blocked, return error inline instead
+ * of through the bio->bi_end_io() callback.
+ */
+ if (nowait)
+ bio->bi_opf |= (REQ_NOWAIT | REQ_NOWAIT_INLINE);
+
dio->size += bio->bi_iter.bi_size;
pos += bio->bi_iter.bi_size;
@@ -411,6 +428,10 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
}
qc = submit_bio(bio);
+ if (qc == BLK_QC_T_EAGAIN) {
+ ret = -EAGAIN;
+ goto err;
+ }
if (polled)
WRITE_ONCE(iocb->ki_cookie, qc);
@@ -431,8 +452,17 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
atomic_inc(&dio->ref);
}
- submit_bio(bio);
- bio = bio_alloc(GFP_KERNEL, nr_pages);
+ qc = submit_bio(bio);
+ if (qc == BLK_QC_T_EAGAIN) {
+ ret = -EAGAIN;
+ goto err;
+ }
+
+ bio = bio_alloc(gfp, nr_pages);
+ if (!bio) {
+ ret = -EAGAIN;
+ goto err;
+ }
}
if (!is_poll)
@@ -452,6 +482,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
}
__set_current_state(TASK_RUNNING);
+out:
if (!ret)
ret = blk_status_to_errno(dio->bio.bi_status);
if (likely(!ret))
@@ -459,6 +490,10 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
bio_put(&dio->bio);
return ret;
+err:
+ if (!is_poll)
+ blk_finish_plug(&plug);
+ goto out;
}
static ssize_t
--
2.17.1
^ permalink raw reply related [flat|nested] 2+ messages in thread
end of thread, other threads:[~2019-07-17 15:04 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
[not found] <20190717150445.1131-1-axboe@kernel.dk>
2019-07-17 15:04 ` [PATCH 1/2] blk-mq: allow REQ_NOWAIT to return an error inline Jens Axboe
2019-07-17 15:04 ` [PATCH 2/2] block: properly handle IOCB_NOWAIT for async O_DIRECT IO Jens Axboe
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).