public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
From: Max Gurtovoy <mgurtovoy@nvidia.com>
To: "Michael S. Tsirkin" <mst@redhat.com>
Cc: stefanha@redhat.com, virtualization@lists.linux.dev,
	axboe@kernel.dk, kvm@vger.kernel.org,
	linux-block@vger.kernel.org, oren@nvidia.com
Subject: Re: [PATCH 1/1] virtio_blk: implement init_hctx MQ operation
Date: Thu, 1 Aug 2024 18:39:16 +0300	[thread overview]
Message-ID: <9400fb28-47c2-4629-af17-df2a95f2d3d8@nvidia.com> (raw)
In-Reply-To: <20240801112843-mutt-send-email-mst@kernel.org>


On 01/08/2024 18:29, Michael S. Tsirkin wrote:
> On Thu, Aug 01, 2024 at 06:17:21PM +0300, Max Gurtovoy wrote:
>> On 01/08/2024 18:13, Michael S. Tsirkin wrote:
>>> On Thu, Aug 01, 2024 at 06:11:37PM +0300, Max Gurtovoy wrote:
>>>> In this operation set the driver data of the hctx to point to the virtio
>>>> block queue. By doing so, we can use this reference in the and reduce
>>> in the .... ?
>> sorry for the type.
>>
>> should be :
>>
>> "By doing so, we can use this reference and reduce the number of operations in the fast path."
> ok. what kind of benefit do you see with this patch?

As mentioned. This is a micro optimization that reduce the number of 
instructions/dereferences in the fast path.


>
>>>> the number of operations in the fast path.
>>>>
>>>> Signed-off-by: Max Gurtovoy <mgurtovoy@nvidia.com>
>>>> ---
>>>>    drivers/block/virtio_blk.c | 42 ++++++++++++++++++++------------------
>>>>    1 file changed, 22 insertions(+), 20 deletions(-)
>>>>
>>>> diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
>>>> index 2351f411fa46..35a7a586f6f5 100644
>>>> --- a/drivers/block/virtio_blk.c
>>>> +++ b/drivers/block/virtio_blk.c
>>>> @@ -129,14 +129,6 @@ static inline blk_status_t virtblk_result(u8 status)
>>>>    	}
>>>>    }
>>>> -static inline struct virtio_blk_vq *get_virtio_blk_vq(struct blk_mq_hw_ctx *hctx)
>>>> -{
>>>> -	struct virtio_blk *vblk = hctx->queue->queuedata;
>>>> -	struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
>>>> -
>>>> -	return vq;
>>>> -}
>>>> -
>>>>    static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
>>>>    {
>>>>    	struct scatterlist out_hdr, in_hdr, *sgs[3];
>>>> @@ -377,8 +369,7 @@ static void virtblk_done(struct virtqueue *vq)
>>>>    static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
>>>>    {
>>>> -	struct virtio_blk *vblk = hctx->queue->queuedata;
>>>> -	struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
>>>> +	struct virtio_blk_vq *vq = hctx->driver_data;
>>>>    	bool kick;
>>>>    	spin_lock_irq(&vq->lock);
>>>> @@ -428,10 +419,10 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
>>>>    			   const struct blk_mq_queue_data *bd)
>>>>    {
>>>>    	struct virtio_blk *vblk = hctx->queue->queuedata;
>>>> +	struct virtio_blk_vq *vq = hctx->driver_data;
>>>>    	struct request *req = bd->rq;
>>>>    	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
>>>>    	unsigned long flags;
>>>> -	int qid = hctx->queue_num;
>>>>    	bool notify = false;
>>>>    	blk_status_t status;
>>>>    	int err;
>>>> @@ -440,26 +431,26 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
>>>>    	if (unlikely(status))
>>>>    		return status;
>>>> -	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
>>>> -	err = virtblk_add_req(vblk->vqs[qid].vq, vbr);
>>>> +	spin_lock_irqsave(&vq->lock, flags);
>>>> +	err = virtblk_add_req(vq->vq, vbr);
>>>>    	if (err) {
>>>> -		virtqueue_kick(vblk->vqs[qid].vq);
>>>> +		virtqueue_kick(vq->vq);
>>>>    		/* Don't stop the queue if -ENOMEM: we may have failed to
>>>>    		 * bounce the buffer due to global resource outage.
>>>>    		 */
>>>>    		if (err == -ENOSPC)
>>>>    			blk_mq_stop_hw_queue(hctx);
>>>> -		spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
>>>> +		spin_unlock_irqrestore(&vq->lock, flags);
>>>>    		virtblk_unmap_data(req, vbr);
>>>>    		return virtblk_fail_to_queue(req, err);
>>>>    	}
>>>> -	if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
>>>> +	if (bd->last && virtqueue_kick_prepare(vq->vq))
>>>>    		notify = true;
>>>> -	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
>>>> +	spin_unlock_irqrestore(&vq->lock, flags);
>>>>    	if (notify)
>>>> -		virtqueue_notify(vblk->vqs[qid].vq);
>>>> +		virtqueue_notify(vq->vq);
>>>>    	return BLK_STS_OK;
>>>>    }
>>>> @@ -504,7 +495,7 @@ static void virtio_queue_rqs(struct request **rqlist)
>>>>    	struct request *requeue_list = NULL;
>>>>    	rq_list_for_each_safe(rqlist, req, next) {
>>>> -		struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx);
>>>> +		struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
>>>>    		bool kick;
>>>>    		if (!virtblk_prep_rq_batch(req)) {
>>>> @@ -1164,6 +1155,16 @@ static const struct attribute_group *virtblk_attr_groups[] = {
>>>>    	NULL,
>>>>    };
>>>> +static int virtblk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
>>>> +		unsigned int hctx_idx)
>>>> +{
>>>> +	struct virtio_blk *vblk = data;
>>>> +	struct virtio_blk_vq *vq = &vblk->vqs[hctx_idx];
>>>> +
>>>> +	hctx->driver_data = vq;
>>>> +	return 0;
>>>> +}
>>>> +
>>>>    static void virtblk_map_queues(struct blk_mq_tag_set *set)
>>>>    {
>>>>    	struct virtio_blk *vblk = set->driver_data;
>>>> @@ -1205,7 +1206,7 @@ static void virtblk_complete_batch(struct io_comp_batch *iob)
>>>>    static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
>>>>    {
>>>>    	struct virtio_blk *vblk = hctx->queue->queuedata;
>>>> -	struct virtio_blk_vq *vq = get_virtio_blk_vq(hctx);
>>>> +	struct virtio_blk_vq *vq = hctx->driver_data;
>>>>    	struct virtblk_req *vbr;
>>>>    	unsigned long flags;
>>>>    	unsigned int len;
>>>> @@ -1236,6 +1237,7 @@ static const struct blk_mq_ops virtio_mq_ops = {
>>>>    	.queue_rqs	= virtio_queue_rqs,
>>>>    	.commit_rqs	= virtio_commit_rqs,
>>>>    	.complete	= virtblk_request_done,
>>>> +	.init_hctx	= virtblk_init_hctx,
>>>>    	.map_queues	= virtblk_map_queues,
>>>>    	.poll		= virtblk_poll,
>>>>    };
>>>> -- 
>>>> 2.18.1

  reply	other threads:[~2024-08-01 15:39 UTC|newest]

Thread overview: 13+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-08-01 15:11 [PATCH 1/1] virtio_blk: implement init_hctx MQ operation Max Gurtovoy
2024-08-01 15:13 ` Michael S. Tsirkin
2024-08-01 15:17   ` Max Gurtovoy
2024-08-01 15:29     ` Michael S. Tsirkin
2024-08-01 15:39       ` Max Gurtovoy [this message]
2024-08-01 15:43         ` Michael S. Tsirkin
     [not found]           ` <6a8f0c72-ba77-42c3-8d85-6bb23a23f025@nvidia.com>
2024-08-01 17:46             ` Michael S. Tsirkin
2024-08-01 17:56             ` Stefan Hajnoczi
2024-08-02 22:07               ` Max Gurtovoy
2024-08-03 12:39                 ` Michael S. Tsirkin
2024-08-03 17:54                   ` Max Gurtovoy
2024-08-07 13:19                     ` Stefan Hajnoczi
2024-08-07 13:34                     ` Michael S. Tsirkin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=9400fb28-47c2-4629-af17-df2a95f2d3d8@nvidia.com \
    --to=mgurtovoy@nvidia.com \
    --cc=axboe@kernel.dk \
    --cc=kvm@vger.kernel.org \
    --cc=linux-block@vger.kernel.org \
    --cc=mst@redhat.com \
    --cc=oren@nvidia.com \
    --cc=stefanha@redhat.com \
    --cc=virtualization@lists.linux.dev \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox