From: Suwan Kim <suwan.kim027@gmail.com>
To: mst@redhat.com, jasowang@redhat.com, stefanha@redhat.com,
pbonzini@redhat.com, mgurtovoy@nvidia.com,
dongli.zhang@oracle.com
Cc: virtualization@lists.linux-foundation.org,
linux-block@vger.kernel.org, Suwan Kim <suwan.kim027@gmail.com>
Subject: [PATCH v4 2/2] virtio-blk: support mq_ops->queue_rqs()
Date: Tue, 5 Apr 2022 14:31:22 +0900 [thread overview]
Message-ID: <20220405053122.77626-3-suwan.kim027@gmail.com> (raw)
In-Reply-To: <20220405053122.77626-1-suwan.kim027@gmail.com>
This patch supports mq_ops->queue_rqs() hook. It has an advantage of
batch submission to virtio-blk driver. It also helps polling I/O because
polling uses batched completion of block layer. Batch submission in
queue_rqs() can boost polling performance.
In queue_rqs(), it iterates plug->mq_list, collects requests that
belong to same HW queue until it encounters a request from other
HW queue or sees the end of the list.
Then, virtio-blk adds requests into virtqueue and kicks virtqueue
to submit requests.
If there is an error, it inserts error request to requeue_list and
passes it to ordinary block layer path.
For verification, I did fio test.
(io_uring, randread, direct=1, bs=4K, iodepth=64 numjobs=N)
I set 4 vcpu and 2 virtio-blk queues for VM and run fio test 5 times.
It shows about 2% improvement.
| numjobs=2 | numjobs=4
-----------------------------------------------------------
fio without queue_rqs() | 291K IOPS | 238K IOPS
-----------------------------------------------------------
fio with queue_rqs() | 295K IOPS | 243K IOPS
For polling I/O performance, I also did fio test as below.
(io_uring, hipri, randread, direct=1, bs=512, iodepth=64 numjobs=4)
I set 4 vcpu and 2 poll queues for VM.
It shows about 2% improvement in polling I/O.
| IOPS | avg latency
-----------------------------------------------------------
fio poll without queue_rqs() | 424K | 613.05 usec
-----------------------------------------------------------
fio poll with queue_rqs() | 435K | 601.01 usec
Signed-off-by: Suwan Kim <suwan.kim027@gmail.com>
---
drivers/block/virtio_blk.c | 110 +++++++++++++++++++++++++++++++++----
1 file changed, 99 insertions(+), 11 deletions(-)
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 712579dcd3cc..a091034bc551 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -92,6 +92,7 @@ struct virtio_blk {
struct virtblk_req {
struct virtio_blk_outhdr out_hdr;
u8 status;
+ int sg_num;
struct sg_table sg_table;
struct scatterlist sg[];
};
@@ -311,18 +312,13 @@ static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
virtqueue_notify(vq->vq);
}
-static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
+static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx,
+ struct virtio_blk *vblk,
+ struct request *req,
+ struct virtblk_req *vbr)
{
- struct virtio_blk *vblk = hctx->queue->queuedata;
- struct request *req = bd->rq;
- struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
- unsigned long flags;
- int num;
- int qid = hctx->queue_num;
- bool notify = false;
blk_status_t status;
- int err;
+ int num;
status = virtblk_setup_cmd(vblk->vdev, req, vbr);
if (unlikely(status))
@@ -335,9 +331,30 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
virtblk_cleanup_cmd(req);
return BLK_STS_RESOURCE;
}
+ vbr->sg_num = num;
+
+ return BLK_STS_OK;
+}
+
+static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct virtio_blk *vblk = hctx->queue->queuedata;
+ struct request *req = bd->rq;
+ struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+ unsigned long flags;
+ int qid = hctx->queue_num;
+ bool notify = false;
+ blk_status_t status;
+ int err;
+
+ status = virtblk_prep_rq(hctx, vblk, req, vbr);
+ if (unlikely(status))
+ return status;
spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
- err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg_table.sgl, num);
+ err = virtblk_add_req(vblk->vqs[qid].vq, vbr,
+ vbr->sg_table.sgl, vbr->sg_num);
if (err) {
virtqueue_kick(vblk->vqs[qid].vq);
/* Don't stop the queue if -ENOMEM: we may have failed to
@@ -367,6 +384,76 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
+static bool virtblk_prep_rq_batch(struct request *req)
+{
+ struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
+ struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+
+ req->mq_hctx->tags->rqs[req->tag] = req;
+
+ return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK;
+}
+
+static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
+ struct request **rqlist,
+ struct request **requeue_list)
+{
+ unsigned long flags;
+ int err;
+ bool kick;
+
+ spin_lock_irqsave(&vq->lock, flags);
+
+ while (!rq_list_empty(*rqlist)) {
+ struct request *req = rq_list_pop(rqlist);
+ struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+
+ err = virtblk_add_req(vq->vq, vbr,
+ vbr->sg_table.sgl, vbr->sg_num);
+ if (err) {
+ virtblk_unmap_data(req, vbr);
+ virtblk_cleanup_cmd(req);
+ rq_list_add(requeue_list, req);
+ }
+ }
+
+ kick = virtqueue_kick_prepare(vq->vq);
+ spin_unlock_irqrestore(&vq->lock, flags);
+
+ return kick;
+}
+
+static void virtio_queue_rqs(struct request **rqlist)
+{
+ struct request *req, *next, *prev = NULL;
+ struct request *requeue_list = NULL;
+
+ rq_list_for_each_safe(rqlist, req, next) {
+ struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
+ bool kick;
+
+ if (!virtblk_prep_rq_batch(req)) {
+ rq_list_move(rqlist, &requeue_list, req, prev);
+ req = prev;
+ if (!req)
+ continue;
+ }
+
+ if (!next || req->mq_hctx != next->mq_hctx) {
+ req->rq_next = NULL;
+ kick = virtblk_add_req_batch(vq, rqlist, &requeue_list);
+ if (kick)
+ virtqueue_notify(vq->vq);
+
+ *rqlist = next;
+ prev = NULL;
+ } else
+ prev = req;
+ }
+
+ *rqlist = requeue_list;
+}
+
/* return id (s/n) string for *disk to *id_str
*/
static int virtblk_get_id(struct gendisk *disk, char *id_str)
@@ -834,6 +921,7 @@ static int virtblk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
static const struct blk_mq_ops virtio_mq_ops = {
.queue_rq = virtio_queue_rq,
+ .queue_rqs = virtio_queue_rqs,
.commit_rqs = virtio_commit_rqs,
.init_hctx = virtblk_init_hctx,
.complete = virtblk_request_done,
--
2.26.3
next prev parent reply other threads:[~2022-04-05 5:32 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-04-05 5:31 [PATCH v4 0/2] virtio-blk: support polling I/O and mq_ops->queue_rqs() Suwan Kim
2022-04-05 5:31 ` [PATCH v4 1/2] virtio-blk: support polling I/O Suwan Kim
2022-04-05 7:26 ` Stefan Hajnoczi
2022-04-05 10:08 ` Suwan Kim
2022-04-05 8:51 ` Christoph Hellwig
2022-04-05 10:30 ` Suwan Kim
2022-04-05 14:35 ` Suwan Kim
2022-04-05 5:31 ` Suwan Kim [this message]
2022-04-05 8:57 ` [PATCH v4 2/2] virtio-blk: support mq_ops->queue_rqs() Christoph Hellwig
2022-04-05 10:56 ` Suwan Kim
2022-04-05 9:09 ` Stefan Hajnoczi
-- strict thread matches above, loose matches on Subject: below --
2022-04-04 9:28 [PATCH v4 0/2] virtio-blk: support polling I/O and mq_ops->queue_rqs() Suwan Kim
2022-04-04 9:28 ` [PATCH v4 2/2] virtio-blk: support mq_ops->queue_rqs() Suwan Kim
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220405053122.77626-3-suwan.kim027@gmail.com \
--to=suwan.kim027@gmail.com \
--cc=dongli.zhang@oracle.com \
--cc=jasowang@redhat.com \
--cc=linux-block@vger.kernel.org \
--cc=mgurtovoy@nvidia.com \
--cc=mst@redhat.com \
--cc=pbonzini@redhat.com \
--cc=stefanha@redhat.com \
--cc=virtualization@lists.linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox