* [PATCH 7.2 0/2] ublk: optimize ublk_rq_has_data()
@ 2026-05-13 21:18 Caleb Sander Mateos
2026-05-13 21:18 ` [PATCH 7.2 1/2] blk-mq: introduce blk_rq_has_data() Caleb Sander Mateos
2026-05-13 21:18 ` [PATCH 7.2 2/2] ublk: optimize ublk_rq_has_data() Caleb Sander Mateos
0 siblings, 2 replies; 3+ messages in thread
From: Caleb Sander Mateos @ 2026-05-13 21:18 UTC (permalink / raw)
To: Ming Lei, Jens Axboe; +Cc: linux-block, linux-kernel, Caleb Sander Mateos
ublk_rq_has_data() currently uses bio_has_data(), which involves 2
indirections and several branches. Introduce a blk_rq_has_data()
analogue for struct request and use it instead to save an indirection
and NULL check.
Caleb Sander Mateos (2):
blk-mq: introduce blk_rq_has_data()
ublk: optimize ublk_rq_has_data()
drivers/block/ublk_drv.c | 21 ++++++++-------------
include/linux/blk-mq.h | 9 +++++++++
2 files changed, 17 insertions(+), 13 deletions(-)
--
2.54.0
^ permalink raw reply [flat|nested] 3+ messages in thread
* [PATCH 7.2 1/2] blk-mq: introduce blk_rq_has_data()
2026-05-13 21:18 [PATCH 7.2 0/2] ublk: optimize ublk_rq_has_data() Caleb Sander Mateos
@ 2026-05-13 21:18 ` Caleb Sander Mateos
2026-05-13 21:18 ` [PATCH 7.2 2/2] ublk: optimize ublk_rq_has_data() Caleb Sander Mateos
1 sibling, 0 replies; 3+ messages in thread
From: Caleb Sander Mateos @ 2026-05-13 21:18 UTC (permalink / raw)
To: Ming Lei, Jens Axboe; +Cc: linux-block, linux-kernel, Caleb Sander Mateos
Add blk_rq_has_data(), an analogue of bio_has_data() for struct request.
This skips one dereference relative to bio_has_data(rq->bio).
Signed-off-by: Caleb Sander Mateos <csander@purestorage.com>
---
include/linux/blk-mq.h | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 18a2388ba581..4349aefdbc87 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -1102,10 +1102,11 @@ struct req_iterator {
bio_iter_last(bvec, _iter.iter))
/*
* blk_rq_pos() : the current sector
* blk_rq_bytes() : bytes left in the entire request
+ * blk_rq_has_data() : whether the request carries data
* blk_rq_cur_bytes() : bytes left in the current segment
* blk_rq_sectors() : sectors left in the entire request
* blk_rq_cur_sectors() : sectors left in the current segment
* blk_rq_stats_sectors() : sectors of the entire request used for stats
*/
@@ -1117,10 +1118,18 @@ static inline sector_t blk_rq_pos(const struct request *rq)
static inline unsigned int blk_rq_bytes(const struct request *rq)
{
return rq->__data_len;
}
+static inline bool blk_rq_has_data(const struct request *rq)
+{
+ return blk_rq_bytes(rq) &&
+ req_op(rq) != REQ_OP_DISCARD &&
+ req_op(rq) != REQ_OP_SECURE_ERASE &&
+ req_op(rq) != REQ_OP_WRITE_ZEROES;
+}
+
static inline int blk_rq_cur_bytes(const struct request *rq)
{
if (!rq->bio)
return 0;
if (!bio_has_data(rq->bio)) /* dataless requests such as discard */
--
2.54.0
^ permalink raw reply related [flat|nested] 3+ messages in thread
* [PATCH 7.2 2/2] ublk: optimize ublk_rq_has_data()
2026-05-13 21:18 [PATCH 7.2 0/2] ublk: optimize ublk_rq_has_data() Caleb Sander Mateos
2026-05-13 21:18 ` [PATCH 7.2 1/2] blk-mq: introduce blk_rq_has_data() Caleb Sander Mateos
@ 2026-05-13 21:18 ` Caleb Sander Mateos
1 sibling, 0 replies; 3+ messages in thread
From: Caleb Sander Mateos @ 2026-05-13 21:18 UTC (permalink / raw)
To: Ming Lei, Jens Axboe; +Cc: linux-block, linux-kernel, Caleb Sander Mateos
ublk_rq_has_data() currently uses bio_has_data(), which involves 2
indirections and several branches. Use blk_rq_has_data() instead to save
an indirection and NULL check.
Signed-off-by: Caleb Sander Mateos <csander@purestorage.com>
---
drivers/block/ublk_drv.c | 21 ++++++++-------------
1 file changed, 8 insertions(+), 13 deletions(-)
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 8e5f3738c203..4d7efc12247c 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -1170,15 +1170,10 @@ static inline struct ublk_queue *ublk_get_queue(struct ublk_device *dev,
int qid)
{
return dev->queues[qid];
}
-static inline bool ublk_rq_has_data(const struct request *rq)
-{
- return bio_has_data(rq->bio);
-}
-
static inline struct ublksrv_io_desc *
ublk_queue_cmd_buf(struct ublk_device *ub, int q_id)
{
return ublk_get_queue(ub, q_id)->io_cmd_buf;
}
@@ -1387,16 +1382,16 @@ static size_t ublk_copy_user_integrity(const struct request *req,
}
#endif /* #ifdef CONFIG_BLK_DEV_INTEGRITY */
static inline bool ublk_need_map_req(const struct request *req)
{
- return ublk_rq_has_data(req) && req_op(req) == REQ_OP_WRITE;
+ return blk_rq_has_data(req) && req_op(req) == REQ_OP_WRITE;
}
static inline bool ublk_need_unmap_req(const struct request *req)
{
- return ublk_rq_has_data(req) &&
+ return blk_rq_has_data(req) &&
(req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_DRV_IN);
}
static unsigned int ublk_map_io(const struct ublk_queue *ubq,
const struct request *req,
@@ -1506,11 +1501,11 @@ static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req)
iod->op_flags = ublk_op | ublk_req_build_flags(req);
iod->nr_sectors = blk_rq_sectors(req);
iod->start_sector = blk_rq_pos(req);
/* Try shmem zero-copy match before setting addr */
- if (ublk_support_shmem_zc(ubq) && ublk_rq_has_data(req)) {
+ if (ublk_support_shmem_zc(ubq) && blk_rq_has_data(req)) {
u32 buf_idx, buf_off;
if (ublk_try_buf_match(ubq->dev, req,
&buf_idx, &buf_off)) {
iod->op_flags |= UBLK_IO_F_SHMEM_ZC;
@@ -1796,11 +1791,11 @@ static void ublk_dispatch_req(struct ublk_queue *ubq, struct request *req)
}
if (!ublk_start_io(ubq, req, io))
return;
- if (ublk_support_auto_buf_reg(ubq) && ublk_rq_has_data(req)) {
+ if (ublk_support_auto_buf_reg(ubq) && blk_rq_has_data(req)) {
ublk_auto_buf_dispatch(ubq, req, io, io->cmd, issue_flags);
} else {
ublk_init_req_ref(ubq, io);
ublk_complete_io_cmd(io, req, UBLK_IO_RES_OK, issue_flags);
}
@@ -1817,11 +1812,11 @@ static bool __ublk_batch_prep_dispatch(struct ublk_queue *ubq,
struct io_uring_cmd *cmd = data->cmd;
if (!ublk_start_io(ubq, req, io))
return false;
- if (ublk_support_auto_buf_reg(ubq) && ublk_rq_has_data(req)) {
+ if (ublk_support_auto_buf_reg(ubq) && blk_rq_has_data(req)) {
res = ublk_auto_buf_register(ubq, req, io, cmd,
data->issue_flags);
if (res == AUTO_BUF_REG_FAIL)
return false;
@@ -3198,11 +3193,11 @@ ublk_daemon_register_io_buf(struct io_uring_cmd *cmd,
new_registered_buffers = io->task_registered_buffers + 1;
if (unlikely(new_registered_buffers >= UBLK_REFCOUNT_INIT))
return ublk_register_io_buf(cmd, ub, q_id, tag, io, index,
issue_flags);
- if (!ublk_dev_support_zero_copy(ub) || !ublk_rq_has_data(req))
+ if (!ublk_dev_support_zero_copy(ub) || !blk_rq_has_data(req))
return -EINVAL;
ret = io_buffer_register_bvec(cmd, req, ublk_io_release, index,
issue_flags);
if (ret)
@@ -3481,11 +3476,11 @@ static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
return NULL;
if (unlikely(!blk_mq_request_started(req) || req->tag != tag))
goto fail_put;
- if (!ublk_rq_has_data(req))
+ if (!blk_rq_has_data(req))
goto fail_put;
return req;
fail_put:
ublk_put_req_ref(io, req);
@@ -4054,11 +4049,11 @@ ublk_user_copy(struct kiocb *iocb, struct iov_iter *iter, int dir)
/* On daemon, io can't be completed concurrently, so skip ref */
if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
return -EINVAL;
req = io->req;
- if (!ublk_rq_has_data(req))
+ if (!blk_rq_has_data(req))
return -EINVAL;
} else {
req = __ublk_check_and_get_req(ub, q_id, tag, io);
if (!req)
return -EINVAL;
--
2.54.0
^ permalink raw reply related [flat|nested] 3+ messages in thread
end of thread, other threads:[~2026-05-13 21:19 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-05-13 21:18 [PATCH 7.2 0/2] ublk: optimize ublk_rq_has_data() Caleb Sander Mateos
2026-05-13 21:18 ` [PATCH 7.2 1/2] blk-mq: introduce blk_rq_has_data() Caleb Sander Mateos
2026-05-13 21:18 ` [PATCH 7.2 2/2] ublk: optimize ublk_rq_has_data() Caleb Sander Mateos
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox