From: Suwan Kim <suwan.kim027@gmail.com>
To: Stefan Hajnoczi <stefanha@redhat.com>
Cc: mst@redhat.com, jasowang@redhat.com, pbonzini@redhat.com,
mgurtovoy@nvidia.com, virtualization@lists.linux-foundation.org,
linux-block@vger.kernel.org
Subject: Re: [PATCH v3 2/2] virtio-blk: support mq_ops->queue_rqs()
Date: Tue, 29 Mar 2022 00:50:33 +0900 [thread overview]
Message-ID: <YkHZSV+USBSRPuTv@localhost.localdomain> (raw)
In-Reply-To: <YkG1HeQ8qu11KFnF@stefanha-x1.localdomain>
On Mon, Mar 28, 2022 at 02:16:13PM +0100, Stefan Hajnoczi wrote:
> On Thu, Mar 24, 2022 at 11:04:50PM +0900, Suwan Kim wrote:
> > @@ -367,6 +381,66 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
> > return BLK_STS_OK;
> > }
> >
> > +static bool virtblk_prep_rq_batch(struct virtio_blk_vq *vq, struct request *req)
> > +{
> > + struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
> > + struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
> > + unsigned long flags;
> > + int num, err;
> > +
> > + req->mq_hctx->tags->rqs[req->tag] = req;
> > +
> > + if (virtblk_prep_rq(req->mq_hctx, vblk, req, vbr, &num) != BLK_STS_OK)
> > + return false;
> > +
> > + spin_lock_irqsave(&vq->lock, flags);
> > + err = virtblk_add_req(vq->vq, vbr, vbr->sg_table.sgl, num);
> > + if (err) {
> > + spin_unlock_irqrestore(&vq->lock, flags);
> > + virtblk_unmap_data(req, vbr);
> > + virtblk_cleanup_cmd(req);
> > + return false;
> > + }
> > + spin_unlock_irqrestore(&vq->lock, flags);
>
> Simplification:
>
> spin_lock_irqsave(&vq->lock, flags);
> err = virtblk_add_req(vq->vq, vbr, vbr->sg_table.sgl, num);
> spin_unlock_irqrestore(&vq->lock, flags);
> if (err) {
> virtblk_unmap_data(req, vbr);
> virtblk_cleanup_cmd(req);
> return false;
> }
>
Thanks! I will fix it.
> > +
> > + return true;
> > +}
> > +
> > +static void virtio_queue_rqs(struct request **rqlist)
> > +{
> > + struct request *req, *next, *prev = NULL;
> > + struct request *requeue_list = NULL;
> > +
> > + rq_list_for_each_safe(rqlist, req, next) {
> > + struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
> > + unsigned long flags;
> > + bool kick;
> > +
> > + if (!virtblk_prep_rq_batch(vq, req)) {
> > + rq_list_move(rqlist, &requeue_list, req, prev);
> > + req = prev;
> > +
> > + if (!req)
> > + continue;
> > + }
> > +
> > + if (!next || req->mq_hctx != next->mq_hctx) {
> > + spin_lock_irqsave(&vq->lock, flags);
>
> Did you try calling virtblk_add_req() here to avoid acquiring and
> releasing the lock multiple times? In other words, do virtblk_prep_rq()
> but wait until we get here to do virtblk_add_req().
>
> I don't know if it has any measurable effect on performance or maybe the
> code would become too complex, but I noticed that we're not fully
> exploiting batching.
I tried as you said. I called virtlblk_add_req() and added requests
of rqlist to virtqueue in this if statement with holding the lock
only once.
I attach the code at the end of this mail.
Please refer the code.
But I didn't see improvement. It showed slightly worse performance
than the current patch.
> > + kick = virtqueue_kick_prepare(vq->vq);
> > + spin_unlock_irqrestore(&vq->lock, flags);
> > + if (kick)
> > + virtqueue_notify(vq->vq);
> > +
> > + req->rq_next = NULL;
Did you ask this part?
> > + *rqlist = next;
> > + prev = NULL;
> > + } else
> > + prev = req;
>
> What guarantees that req is still alive after we called
> virtblk_add_req()? The device may have seen it and completed it already
> by the time we get here.
Isn't request completed after the kick?
If you asked about "req->rq_next = NULL",
I think it should be placed before
"kick = virtqueue_kick_prepare(vq->vq);"
-----------
req->rq_next = NULL;
kick = virtqueue_kick_prepare(vq->vq);
spin_unlock_irqrestore(&vq->lock, flags);
if (kick)
virtqueue_notify(vq->vq);
-----------
Regards,
Suwan Kim
---
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 2218cab39c72..d972d3042068 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -92,6 +92,7 @@ struct virtio_blk {
struct virtblk_req {
struct virtio_blk_outhdr out_hdr;
u8 status;
+ int sg_num;
struct sg_table sg_table;
struct scatterlist sg[];
};
@@ -311,18 +312,13 @@ static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
virtqueue_notify(vq->vq);
}
-static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
+static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx,
+ struct virtio_blk *vblk,
+ struct request *req,
+ struct virtblk_req *vbr)
{
- struct virtio_blk *vblk = hctx->queue->queuedata;
- struct request *req = bd->rq;
- struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
- unsigned long flags;
- int num;
- int qid = hctx->queue_num;
- bool notify = false;
blk_status_t status;
- int err;
+ int num;
status = virtblk_setup_cmd(vblk->vdev, req, vbr);
if (unlikely(status))
@@ -335,9 +331,30 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
virtblk_cleanup_cmd(req);
return BLK_STS_RESOURCE;
}
+ vbr->sg_num = num;
+
+ return BLK_STS_OK;
+}
+
+static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct virtio_blk *vblk = hctx->queue->queuedata;
+ struct request *req = bd->rq;
+ struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+ unsigned long flags;
+ int qid = hctx->queue_num;
+ bool notify = false;
+ blk_status_t status;
+ int err;
+
+ status = virtblk_prep_rq(hctx, vblk, req, vbr);
+ if (unlikely(status))
+ return status;
spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
- err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg_table.sgl, num);
+ err = virtblk_add_req(vblk->vqs[qid].vq, vbr,
+ vbr->sg_table.sgl, vbr->sg_num);
if (err) {
virtqueue_kick(vblk->vqs[qid].vq);
/* Don't stop the queue if -ENOMEM: we may have failed to
@@ -367,6 +384,76 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
+static bool virtblk_prep_rq_batch(struct virtio_blk_vq *vq, struct request *req)
+{
+ struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
+ struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+
+ req->mq_hctx->tags->rqs[req->tag] = req;
+
+ return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK;
+}
+
+static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
+ struct request **rqlist,
+ struct request **requeue_list)
+{
+ unsigned long flags;
+ int err;
+ bool kick;
+
+ spin_lock_irqsave(&vq->lock, flags);
+ while (!rq_list_empty(*rqlist)) {
+ struct request *req = rq_list_pop(rqlist);
+ struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+
+ err = virtblk_add_req(vq->vq, vbr,
+ vbr->sg_table.sgl, vbr->sg_num);
+ if (err) {
+ virtblk_unmap_data(req, vbr);
+ virtblk_cleanup_cmd(req);
+ rq_list_add(requeue_list, req);
+ }
+ }
+
+ kick = virtqueue_kick_prepare(vq->vq);
+ spin_unlock_irqrestore(&vq->lock, flags);
+
+ return kick;
+}
+
+static void virtio_queue_rqs(struct request **rqlist)
+{
+ struct request *req, *next, *prev = NULL;
+ struct request *requeue_list = NULL;
+
+ rq_list_for_each_safe(rqlist, req, next) {
+ struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
+ bool kick;
+
+ if (!virtblk_prep_rq_batch(vq, req)) {
+ rq_list_move(rqlist, &requeue_list, req, prev);
+ req = prev;
+
+ if (!req)
+ continue;
+ }
+
+ if (!next || req->mq_hctx != next->mq_hctx) {
+ kick = virtblk_add_req_batch(vq, rqlist, &requeue_list);
+ if (kick)
+ virtqueue_notify(vq->vq);
+
+ req->rq_next = NULL;
+ *rqlist = next;
+ prev = NULL;
+ } else
+ prev = req;
+ }
+
+ *rqlist = requeue_list;
+}
+
/* return id (s/n) string for *disk to *id_str
*/
static int virtblk_get_id(struct gendisk *disk, char *id_str)
@@ -818,6 +905,7 @@ static int virtblk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
static const struct blk_mq_ops virtio_mq_ops = {
.queue_rq = virtio_queue_rq,
+ .queue_rqs = virtio_queue_rqs,
.commit_rqs = virtio_commit_rqs,
.init_hctx = virtblk_init_hctx,
.complete = virtblk_request_done,
next prev parent reply other threads:[~2022-03-28 15:50 UTC|newest]
Thread overview: 19+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-03-24 14:04 [PATCH v3 0/2] virtio-blk: support polling I/O and mq_ops->queue_rqs() Suwan Kim
2022-03-24 14:04 ` [PATCH v3 1/2] virtio-blk: support polling I/O Suwan Kim
2022-03-24 14:32 ` Michael S. Tsirkin
2022-03-24 14:46 ` Suwan Kim
2022-03-24 17:56 ` Michael S. Tsirkin
2022-03-26 12:00 ` Suwan Kim
2022-03-24 17:34 ` Dongli Zhang
2022-03-26 11:53 ` Suwan Kim
2022-03-24 17:58 ` Michael S. Tsirkin
2022-03-26 12:44 ` Suwan Kim
2022-03-28 12:53 ` Stefan Hajnoczi
2022-03-28 14:40 ` Suwan Kim
2022-03-24 14:04 ` [PATCH v3 2/2] virtio-blk: support mq_ops->queue_rqs() Suwan Kim
2022-03-28 13:16 ` Stefan Hajnoczi
2022-03-28 15:50 ` Suwan Kim [this message]
2022-03-29 8:45 ` Stefan Hajnoczi
2022-03-29 13:48 ` Suwan Kim
2022-03-29 15:01 ` Stefan Hajnoczi
2022-03-29 15:54 ` Suwan Kim
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=YkHZSV+USBSRPuTv@localhost.localdomain \
--to=suwan.kim027@gmail.com \
--cc=jasowang@redhat.com \
--cc=linux-block@vger.kernel.org \
--cc=mgurtovoy@nvidia.com \
--cc=mst@redhat.com \
--cc=pbonzini@redhat.com \
--cc=stefanha@redhat.com \
--cc=virtualization@lists.linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox