linux-block.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/1] virtio_blk: implement init_hctx MQ operation
@ 2024-08-01 15:11 Max Gurtovoy
  2024-08-01 15:13 ` Michael S. Tsirkin
  0 siblings, 1 reply; 13+ messages in thread
From: Max Gurtovoy @ 2024-08-01 15:11 UTC (permalink / raw)
  To: stefanha, virtualization, mst, axboe; +Cc: kvm, linux-block, oren, Max Gurtovoy

In this operation set the driver data of the hctx to point to the virtio
block queue. By doing so, we can use this reference in the and reduce
the number of operations in the fast path.

Signed-off-by: Max Gurtovoy <mgurtovoy@nvidia.com>
---
 drivers/block/virtio_blk.c | 42 ++++++++++++++++++++------------------
 1 file changed, 22 insertions(+), 20 deletions(-)

diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 2351f411fa46..35a7a586f6f5 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -129,14 +129,6 @@ static inline blk_status_t virtblk_result(u8 status)
 	}
 }
 
-static inline struct virtio_blk_vq *get_virtio_blk_vq(struct blk_mq_hw_ctx *hctx)
-{
-	struct virtio_blk *vblk = hctx->queue->queuedata;
-	struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
-
-	return vq;
-}
-
 static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
 {
 	struct scatterlist out_hdr, in_hdr, *sgs[3];
@@ -377,8 +369,7 @@ static void virtblk_done(struct virtqueue *vq)
 
 static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
 {
-	struct virtio_blk *vblk = hctx->queue->queuedata;
-	struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
+	struct virtio_blk_vq *vq = hctx->driver_data;
 	bool kick;
 
 	spin_lock_irq(&vq->lock);
@@ -428,10 +419,10 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
 			   const struct blk_mq_queue_data *bd)
 {
 	struct virtio_blk *vblk = hctx->queue->queuedata;
+	struct virtio_blk_vq *vq = hctx->driver_data;
 	struct request *req = bd->rq;
 	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
 	unsigned long flags;
-	int qid = hctx->queue_num;
 	bool notify = false;
 	blk_status_t status;
 	int err;
@@ -440,26 +431,26 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
 	if (unlikely(status))
 		return status;
 
-	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
-	err = virtblk_add_req(vblk->vqs[qid].vq, vbr);
+	spin_lock_irqsave(&vq->lock, flags);
+	err = virtblk_add_req(vq->vq, vbr);
 	if (err) {
-		virtqueue_kick(vblk->vqs[qid].vq);
+		virtqueue_kick(vq->vq);
 		/* Don't stop the queue if -ENOMEM: we may have failed to
 		 * bounce the buffer due to global resource outage.
 		 */
 		if (err == -ENOSPC)
 			blk_mq_stop_hw_queue(hctx);
-		spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
+		spin_unlock_irqrestore(&vq->lock, flags);
 		virtblk_unmap_data(req, vbr);
 		return virtblk_fail_to_queue(req, err);
 	}
 
-	if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
+	if (bd->last && virtqueue_kick_prepare(vq->vq))
 		notify = true;
-	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
+	spin_unlock_irqrestore(&vq->lock, flags);
 
 	if (notify)
-		virtqueue_notify(vblk->vqs[qid].vq);
+		virtqueue_notify(vq->vq);
 	return BLK_STS_OK;
 }
 
@@ -504,7 +495,7 @@ static void virtio_queue_rqs(struct request **rqlist)
 	struct request *requeue_list = NULL;
 
 	rq_list_for_each_safe(rqlist, req, next) {
-		struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx);
+		struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
 		bool kick;
 
 		if (!virtblk_prep_rq_batch(req)) {
@@ -1164,6 +1155,16 @@ static const struct attribute_group *virtblk_attr_groups[] = {
 	NULL,
 };
 
+static int virtblk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+		unsigned int hctx_idx)
+{
+	struct virtio_blk *vblk = data;
+	struct virtio_blk_vq *vq = &vblk->vqs[hctx_idx];
+
+	hctx->driver_data = vq;
+	return 0;
+}
+
 static void virtblk_map_queues(struct blk_mq_tag_set *set)
 {
 	struct virtio_blk *vblk = set->driver_data;
@@ -1205,7 +1206,7 @@ static void virtblk_complete_batch(struct io_comp_batch *iob)
 static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
 {
 	struct virtio_blk *vblk = hctx->queue->queuedata;
-	struct virtio_blk_vq *vq = get_virtio_blk_vq(hctx);
+	struct virtio_blk_vq *vq = hctx->driver_data;
 	struct virtblk_req *vbr;
 	unsigned long flags;
 	unsigned int len;
@@ -1236,6 +1237,7 @@ static const struct blk_mq_ops virtio_mq_ops = {
 	.queue_rqs	= virtio_queue_rqs,
 	.commit_rqs	= virtio_commit_rqs,
 	.complete	= virtblk_request_done,
+	.init_hctx	= virtblk_init_hctx,
 	.map_queues	= virtblk_map_queues,
 	.poll		= virtblk_poll,
 };
-- 
2.18.1


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/1] virtio_blk: implement init_hctx MQ operation
  2024-08-01 15:11 [PATCH 1/1] virtio_blk: implement init_hctx MQ operation Max Gurtovoy
@ 2024-08-01 15:13 ` Michael S. Tsirkin
  2024-08-01 15:17   ` Max Gurtovoy
  0 siblings, 1 reply; 13+ messages in thread
From: Michael S. Tsirkin @ 2024-08-01 15:13 UTC (permalink / raw)
  To: Max Gurtovoy; +Cc: stefanha, virtualization, axboe, kvm, linux-block, oren

On Thu, Aug 01, 2024 at 06:11:37PM +0300, Max Gurtovoy wrote:
> In this operation set the driver data of the hctx to point to the virtio
> block queue. By doing so, we can use this reference in the and reduce

in the .... ?

> the number of operations in the fast path.
> 
> Signed-off-by: Max Gurtovoy <mgurtovoy@nvidia.com>
> ---
>  drivers/block/virtio_blk.c | 42 ++++++++++++++++++++------------------
>  1 file changed, 22 insertions(+), 20 deletions(-)
> 
> diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
> index 2351f411fa46..35a7a586f6f5 100644
> --- a/drivers/block/virtio_blk.c
> +++ b/drivers/block/virtio_blk.c
> @@ -129,14 +129,6 @@ static inline blk_status_t virtblk_result(u8 status)
>  	}
>  }
>  
> -static inline struct virtio_blk_vq *get_virtio_blk_vq(struct blk_mq_hw_ctx *hctx)
> -{
> -	struct virtio_blk *vblk = hctx->queue->queuedata;
> -	struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
> -
> -	return vq;
> -}
> -
>  static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
>  {
>  	struct scatterlist out_hdr, in_hdr, *sgs[3];
> @@ -377,8 +369,7 @@ static void virtblk_done(struct virtqueue *vq)
>  
>  static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
>  {
> -	struct virtio_blk *vblk = hctx->queue->queuedata;
> -	struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
> +	struct virtio_blk_vq *vq = hctx->driver_data;
>  	bool kick;
>  
>  	spin_lock_irq(&vq->lock);
> @@ -428,10 +419,10 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
>  			   const struct blk_mq_queue_data *bd)
>  {
>  	struct virtio_blk *vblk = hctx->queue->queuedata;
> +	struct virtio_blk_vq *vq = hctx->driver_data;
>  	struct request *req = bd->rq;
>  	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
>  	unsigned long flags;
> -	int qid = hctx->queue_num;
>  	bool notify = false;
>  	blk_status_t status;
>  	int err;
> @@ -440,26 +431,26 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
>  	if (unlikely(status))
>  		return status;
>  
> -	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
> -	err = virtblk_add_req(vblk->vqs[qid].vq, vbr);
> +	spin_lock_irqsave(&vq->lock, flags);
> +	err = virtblk_add_req(vq->vq, vbr);
>  	if (err) {
> -		virtqueue_kick(vblk->vqs[qid].vq);
> +		virtqueue_kick(vq->vq);
>  		/* Don't stop the queue if -ENOMEM: we may have failed to
>  		 * bounce the buffer due to global resource outage.
>  		 */
>  		if (err == -ENOSPC)
>  			blk_mq_stop_hw_queue(hctx);
> -		spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
> +		spin_unlock_irqrestore(&vq->lock, flags);
>  		virtblk_unmap_data(req, vbr);
>  		return virtblk_fail_to_queue(req, err);
>  	}
>  
> -	if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
> +	if (bd->last && virtqueue_kick_prepare(vq->vq))
>  		notify = true;
> -	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
> +	spin_unlock_irqrestore(&vq->lock, flags);
>  
>  	if (notify)
> -		virtqueue_notify(vblk->vqs[qid].vq);
> +		virtqueue_notify(vq->vq);
>  	return BLK_STS_OK;
>  }
>  
> @@ -504,7 +495,7 @@ static void virtio_queue_rqs(struct request **rqlist)
>  	struct request *requeue_list = NULL;
>  
>  	rq_list_for_each_safe(rqlist, req, next) {
> -		struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx);
> +		struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
>  		bool kick;
>  
>  		if (!virtblk_prep_rq_batch(req)) {
> @@ -1164,6 +1155,16 @@ static const struct attribute_group *virtblk_attr_groups[] = {
>  	NULL,
>  };
>  
> +static int virtblk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
> +		unsigned int hctx_idx)
> +{
> +	struct virtio_blk *vblk = data;
> +	struct virtio_blk_vq *vq = &vblk->vqs[hctx_idx];
> +
> +	hctx->driver_data = vq;
> +	return 0;
> +}
> +
>  static void virtblk_map_queues(struct blk_mq_tag_set *set)
>  {
>  	struct virtio_blk *vblk = set->driver_data;
> @@ -1205,7 +1206,7 @@ static void virtblk_complete_batch(struct io_comp_batch *iob)
>  static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
>  {
>  	struct virtio_blk *vblk = hctx->queue->queuedata;
> -	struct virtio_blk_vq *vq = get_virtio_blk_vq(hctx);
> +	struct virtio_blk_vq *vq = hctx->driver_data;
>  	struct virtblk_req *vbr;
>  	unsigned long flags;
>  	unsigned int len;
> @@ -1236,6 +1237,7 @@ static const struct blk_mq_ops virtio_mq_ops = {
>  	.queue_rqs	= virtio_queue_rqs,
>  	.commit_rqs	= virtio_commit_rqs,
>  	.complete	= virtblk_request_done,
> +	.init_hctx	= virtblk_init_hctx,
>  	.map_queues	= virtblk_map_queues,
>  	.poll		= virtblk_poll,
>  };
> -- 
> 2.18.1


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/1] virtio_blk: implement init_hctx MQ operation
  2024-08-01 15:13 ` Michael S. Tsirkin
@ 2024-08-01 15:17   ` Max Gurtovoy
  2024-08-01 15:29     ` Michael S. Tsirkin
  0 siblings, 1 reply; 13+ messages in thread
From: Max Gurtovoy @ 2024-08-01 15:17 UTC (permalink / raw)
  To: Michael S. Tsirkin
  Cc: stefanha, virtualization, axboe, kvm, linux-block, oren


On 01/08/2024 18:13, Michael S. Tsirkin wrote:
> On Thu, Aug 01, 2024 at 06:11:37PM +0300, Max Gurtovoy wrote:
>> In this operation set the driver data of the hctx to point to the virtio
>> block queue. By doing so, we can use this reference in the and reduce
> in the .... ?

sorry for the type.

should be :

"By doing so, we can use this reference and reduce the number of operations in the fast path."


>
>> the number of operations in the fast path.
>>
>> Signed-off-by: Max Gurtovoy <mgurtovoy@nvidia.com>
>> ---
>>   drivers/block/virtio_blk.c | 42 ++++++++++++++++++++------------------
>>   1 file changed, 22 insertions(+), 20 deletions(-)
>>
>> diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
>> index 2351f411fa46..35a7a586f6f5 100644
>> --- a/drivers/block/virtio_blk.c
>> +++ b/drivers/block/virtio_blk.c
>> @@ -129,14 +129,6 @@ static inline blk_status_t virtblk_result(u8 status)
>>   	}
>>   }
>>   
>> -static inline struct virtio_blk_vq *get_virtio_blk_vq(struct blk_mq_hw_ctx *hctx)
>> -{
>> -	struct virtio_blk *vblk = hctx->queue->queuedata;
>> -	struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
>> -
>> -	return vq;
>> -}
>> -
>>   static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
>>   {
>>   	struct scatterlist out_hdr, in_hdr, *sgs[3];
>> @@ -377,8 +369,7 @@ static void virtblk_done(struct virtqueue *vq)
>>   
>>   static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
>>   {
>> -	struct virtio_blk *vblk = hctx->queue->queuedata;
>> -	struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
>> +	struct virtio_blk_vq *vq = hctx->driver_data;
>>   	bool kick;
>>   
>>   	spin_lock_irq(&vq->lock);
>> @@ -428,10 +419,10 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
>>   			   const struct blk_mq_queue_data *bd)
>>   {
>>   	struct virtio_blk *vblk = hctx->queue->queuedata;
>> +	struct virtio_blk_vq *vq = hctx->driver_data;
>>   	struct request *req = bd->rq;
>>   	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
>>   	unsigned long flags;
>> -	int qid = hctx->queue_num;
>>   	bool notify = false;
>>   	blk_status_t status;
>>   	int err;
>> @@ -440,26 +431,26 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
>>   	if (unlikely(status))
>>   		return status;
>>   
>> -	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
>> -	err = virtblk_add_req(vblk->vqs[qid].vq, vbr);
>> +	spin_lock_irqsave(&vq->lock, flags);
>> +	err = virtblk_add_req(vq->vq, vbr);
>>   	if (err) {
>> -		virtqueue_kick(vblk->vqs[qid].vq);
>> +		virtqueue_kick(vq->vq);
>>   		/* Don't stop the queue if -ENOMEM: we may have failed to
>>   		 * bounce the buffer due to global resource outage.
>>   		 */
>>   		if (err == -ENOSPC)
>>   			blk_mq_stop_hw_queue(hctx);
>> -		spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
>> +		spin_unlock_irqrestore(&vq->lock, flags);
>>   		virtblk_unmap_data(req, vbr);
>>   		return virtblk_fail_to_queue(req, err);
>>   	}
>>   
>> -	if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
>> +	if (bd->last && virtqueue_kick_prepare(vq->vq))
>>   		notify = true;
>> -	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
>> +	spin_unlock_irqrestore(&vq->lock, flags);
>>   
>>   	if (notify)
>> -		virtqueue_notify(vblk->vqs[qid].vq);
>> +		virtqueue_notify(vq->vq);
>>   	return BLK_STS_OK;
>>   }
>>   
>> @@ -504,7 +495,7 @@ static void virtio_queue_rqs(struct request **rqlist)
>>   	struct request *requeue_list = NULL;
>>   
>>   	rq_list_for_each_safe(rqlist, req, next) {
>> -		struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx);
>> +		struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
>>   		bool kick;
>>   
>>   		if (!virtblk_prep_rq_batch(req)) {
>> @@ -1164,6 +1155,16 @@ static const struct attribute_group *virtblk_attr_groups[] = {
>>   	NULL,
>>   };
>>   
>> +static int virtblk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
>> +		unsigned int hctx_idx)
>> +{
>> +	struct virtio_blk *vblk = data;
>> +	struct virtio_blk_vq *vq = &vblk->vqs[hctx_idx];
>> +
>> +	hctx->driver_data = vq;
>> +	return 0;
>> +}
>> +
>>   static void virtblk_map_queues(struct blk_mq_tag_set *set)
>>   {
>>   	struct virtio_blk *vblk = set->driver_data;
>> @@ -1205,7 +1206,7 @@ static void virtblk_complete_batch(struct io_comp_batch *iob)
>>   static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
>>   {
>>   	struct virtio_blk *vblk = hctx->queue->queuedata;
>> -	struct virtio_blk_vq *vq = get_virtio_blk_vq(hctx);
>> +	struct virtio_blk_vq *vq = hctx->driver_data;
>>   	struct virtblk_req *vbr;
>>   	unsigned long flags;
>>   	unsigned int len;
>> @@ -1236,6 +1237,7 @@ static const struct blk_mq_ops virtio_mq_ops = {
>>   	.queue_rqs	= virtio_queue_rqs,
>>   	.commit_rqs	= virtio_commit_rqs,
>>   	.complete	= virtblk_request_done,
>> +	.init_hctx	= virtblk_init_hctx,
>>   	.map_queues	= virtblk_map_queues,
>>   	.poll		= virtblk_poll,
>>   };
>> -- 
>> 2.18.1

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/1] virtio_blk: implement init_hctx MQ operation
  2024-08-01 15:17   ` Max Gurtovoy
@ 2024-08-01 15:29     ` Michael S. Tsirkin
  2024-08-01 15:39       ` Max Gurtovoy
  0 siblings, 1 reply; 13+ messages in thread
From: Michael S. Tsirkin @ 2024-08-01 15:29 UTC (permalink / raw)
  To: Max Gurtovoy; +Cc: stefanha, virtualization, axboe, kvm, linux-block, oren

On Thu, Aug 01, 2024 at 06:17:21PM +0300, Max Gurtovoy wrote:
> 
> On 01/08/2024 18:13, Michael S. Tsirkin wrote:
> > On Thu, Aug 01, 2024 at 06:11:37PM +0300, Max Gurtovoy wrote:
> > > In this operation set the driver data of the hctx to point to the virtio
> > > block queue. By doing so, we can use this reference in the and reduce
> > in the .... ?
> 
> sorry for the type.
> 
> should be :
> 
> "By doing so, we can use this reference and reduce the number of operations in the fast path."

ok. what kind of benefit do you see with this patch?

> 
> > 
> > > the number of operations in the fast path.
> > > 
> > > Signed-off-by: Max Gurtovoy <mgurtovoy@nvidia.com>
> > > ---
> > >   drivers/block/virtio_blk.c | 42 ++++++++++++++++++++------------------
> > >   1 file changed, 22 insertions(+), 20 deletions(-)
> > > 
> > > diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
> > > index 2351f411fa46..35a7a586f6f5 100644
> > > --- a/drivers/block/virtio_blk.c
> > > +++ b/drivers/block/virtio_blk.c
> > > @@ -129,14 +129,6 @@ static inline blk_status_t virtblk_result(u8 status)
> > >   	}
> > >   }
> > > -static inline struct virtio_blk_vq *get_virtio_blk_vq(struct blk_mq_hw_ctx *hctx)
> > > -{
> > > -	struct virtio_blk *vblk = hctx->queue->queuedata;
> > > -	struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
> > > -
> > > -	return vq;
> > > -}
> > > -
> > >   static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
> > >   {
> > >   	struct scatterlist out_hdr, in_hdr, *sgs[3];
> > > @@ -377,8 +369,7 @@ static void virtblk_done(struct virtqueue *vq)
> > >   static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
> > >   {
> > > -	struct virtio_blk *vblk = hctx->queue->queuedata;
> > > -	struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
> > > +	struct virtio_blk_vq *vq = hctx->driver_data;
> > >   	bool kick;
> > >   	spin_lock_irq(&vq->lock);
> > > @@ -428,10 +419,10 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
> > >   			   const struct blk_mq_queue_data *bd)
> > >   {
> > >   	struct virtio_blk *vblk = hctx->queue->queuedata;
> > > +	struct virtio_blk_vq *vq = hctx->driver_data;
> > >   	struct request *req = bd->rq;
> > >   	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
> > >   	unsigned long flags;
> > > -	int qid = hctx->queue_num;
> > >   	bool notify = false;
> > >   	blk_status_t status;
> > >   	int err;
> > > @@ -440,26 +431,26 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
> > >   	if (unlikely(status))
> > >   		return status;
> > > -	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
> > > -	err = virtblk_add_req(vblk->vqs[qid].vq, vbr);
> > > +	spin_lock_irqsave(&vq->lock, flags);
> > > +	err = virtblk_add_req(vq->vq, vbr);
> > >   	if (err) {
> > > -		virtqueue_kick(vblk->vqs[qid].vq);
> > > +		virtqueue_kick(vq->vq);
> > >   		/* Don't stop the queue if -ENOMEM: we may have failed to
> > >   		 * bounce the buffer due to global resource outage.
> > >   		 */
> > >   		if (err == -ENOSPC)
> > >   			blk_mq_stop_hw_queue(hctx);
> > > -		spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
> > > +		spin_unlock_irqrestore(&vq->lock, flags);
> > >   		virtblk_unmap_data(req, vbr);
> > >   		return virtblk_fail_to_queue(req, err);
> > >   	}
> > > -	if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
> > > +	if (bd->last && virtqueue_kick_prepare(vq->vq))
> > >   		notify = true;
> > > -	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
> > > +	spin_unlock_irqrestore(&vq->lock, flags);
> > >   	if (notify)
> > > -		virtqueue_notify(vblk->vqs[qid].vq);
> > > +		virtqueue_notify(vq->vq);
> > >   	return BLK_STS_OK;
> > >   }
> > > @@ -504,7 +495,7 @@ static void virtio_queue_rqs(struct request **rqlist)
> > >   	struct request *requeue_list = NULL;
> > >   	rq_list_for_each_safe(rqlist, req, next) {
> > > -		struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx);
> > > +		struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
> > >   		bool kick;
> > >   		if (!virtblk_prep_rq_batch(req)) {
> > > @@ -1164,6 +1155,16 @@ static const struct attribute_group *virtblk_attr_groups[] = {
> > >   	NULL,
> > >   };
> > > +static int virtblk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
> > > +		unsigned int hctx_idx)
> > > +{
> > > +	struct virtio_blk *vblk = data;
> > > +	struct virtio_blk_vq *vq = &vblk->vqs[hctx_idx];
> > > +
> > > +	hctx->driver_data = vq;
> > > +	return 0;
> > > +}
> > > +
> > >   static void virtblk_map_queues(struct blk_mq_tag_set *set)
> > >   {
> > >   	struct virtio_blk *vblk = set->driver_data;
> > > @@ -1205,7 +1206,7 @@ static void virtblk_complete_batch(struct io_comp_batch *iob)
> > >   static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
> > >   {
> > >   	struct virtio_blk *vblk = hctx->queue->queuedata;
> > > -	struct virtio_blk_vq *vq = get_virtio_blk_vq(hctx);
> > > +	struct virtio_blk_vq *vq = hctx->driver_data;
> > >   	struct virtblk_req *vbr;
> > >   	unsigned long flags;
> > >   	unsigned int len;
> > > @@ -1236,6 +1237,7 @@ static const struct blk_mq_ops virtio_mq_ops = {
> > >   	.queue_rqs	= virtio_queue_rqs,
> > >   	.commit_rqs	= virtio_commit_rqs,
> > >   	.complete	= virtblk_request_done,
> > > +	.init_hctx	= virtblk_init_hctx,
> > >   	.map_queues	= virtblk_map_queues,
> > >   	.poll		= virtblk_poll,
> > >   };
> > > -- 
> > > 2.18.1


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/1] virtio_blk: implement init_hctx MQ operation
  2024-08-01 15:29     ` Michael S. Tsirkin
@ 2024-08-01 15:39       ` Max Gurtovoy
  2024-08-01 15:43         ` Michael S. Tsirkin
  0 siblings, 1 reply; 13+ messages in thread
From: Max Gurtovoy @ 2024-08-01 15:39 UTC (permalink / raw)
  To: Michael S. Tsirkin
  Cc: stefanha, virtualization, axboe, kvm, linux-block, oren


On 01/08/2024 18:29, Michael S. Tsirkin wrote:
> On Thu, Aug 01, 2024 at 06:17:21PM +0300, Max Gurtovoy wrote:
>> On 01/08/2024 18:13, Michael S. Tsirkin wrote:
>>> On Thu, Aug 01, 2024 at 06:11:37PM +0300, Max Gurtovoy wrote:
>>>> In this operation set the driver data of the hctx to point to the virtio
>>>> block queue. By doing so, we can use this reference in the and reduce
>>> in the .... ?
>> sorry for the type.
>>
>> should be :
>>
>> "By doing so, we can use this reference and reduce the number of operations in the fast path."
> ok. what kind of benefit do you see with this patch?

As mentioned. This is a micro optimization that reduce the number of 
instructions/dereferences in the fast path.


>
>>>> the number of operations in the fast path.
>>>>
>>>> Signed-off-by: Max Gurtovoy <mgurtovoy@nvidia.com>
>>>> ---
>>>>    drivers/block/virtio_blk.c | 42 ++++++++++++++++++++------------------
>>>>    1 file changed, 22 insertions(+), 20 deletions(-)
>>>>
>>>> diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
>>>> index 2351f411fa46..35a7a586f6f5 100644
>>>> --- a/drivers/block/virtio_blk.c
>>>> +++ b/drivers/block/virtio_blk.c
>>>> @@ -129,14 +129,6 @@ static inline blk_status_t virtblk_result(u8 status)
>>>>    	}
>>>>    }
>>>> -static inline struct virtio_blk_vq *get_virtio_blk_vq(struct blk_mq_hw_ctx *hctx)
>>>> -{
>>>> -	struct virtio_blk *vblk = hctx->queue->queuedata;
>>>> -	struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
>>>> -
>>>> -	return vq;
>>>> -}
>>>> -
>>>>    static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
>>>>    {
>>>>    	struct scatterlist out_hdr, in_hdr, *sgs[3];
>>>> @@ -377,8 +369,7 @@ static void virtblk_done(struct virtqueue *vq)
>>>>    static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
>>>>    {
>>>> -	struct virtio_blk *vblk = hctx->queue->queuedata;
>>>> -	struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
>>>> +	struct virtio_blk_vq *vq = hctx->driver_data;
>>>>    	bool kick;
>>>>    	spin_lock_irq(&vq->lock);
>>>> @@ -428,10 +419,10 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
>>>>    			   const struct blk_mq_queue_data *bd)
>>>>    {
>>>>    	struct virtio_blk *vblk = hctx->queue->queuedata;
>>>> +	struct virtio_blk_vq *vq = hctx->driver_data;
>>>>    	struct request *req = bd->rq;
>>>>    	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
>>>>    	unsigned long flags;
>>>> -	int qid = hctx->queue_num;
>>>>    	bool notify = false;
>>>>    	blk_status_t status;
>>>>    	int err;
>>>> @@ -440,26 +431,26 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
>>>>    	if (unlikely(status))
>>>>    		return status;
>>>> -	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
>>>> -	err = virtblk_add_req(vblk->vqs[qid].vq, vbr);
>>>> +	spin_lock_irqsave(&vq->lock, flags);
>>>> +	err = virtblk_add_req(vq->vq, vbr);
>>>>    	if (err) {
>>>> -		virtqueue_kick(vblk->vqs[qid].vq);
>>>> +		virtqueue_kick(vq->vq);
>>>>    		/* Don't stop the queue if -ENOMEM: we may have failed to
>>>>    		 * bounce the buffer due to global resource outage.
>>>>    		 */
>>>>    		if (err == -ENOSPC)
>>>>    			blk_mq_stop_hw_queue(hctx);
>>>> -		spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
>>>> +		spin_unlock_irqrestore(&vq->lock, flags);
>>>>    		virtblk_unmap_data(req, vbr);
>>>>    		return virtblk_fail_to_queue(req, err);
>>>>    	}
>>>> -	if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
>>>> +	if (bd->last && virtqueue_kick_prepare(vq->vq))
>>>>    		notify = true;
>>>> -	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
>>>> +	spin_unlock_irqrestore(&vq->lock, flags);
>>>>    	if (notify)
>>>> -		virtqueue_notify(vblk->vqs[qid].vq);
>>>> +		virtqueue_notify(vq->vq);
>>>>    	return BLK_STS_OK;
>>>>    }
>>>> @@ -504,7 +495,7 @@ static void virtio_queue_rqs(struct request **rqlist)
>>>>    	struct request *requeue_list = NULL;
>>>>    	rq_list_for_each_safe(rqlist, req, next) {
>>>> -		struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx);
>>>> +		struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
>>>>    		bool kick;
>>>>    		if (!virtblk_prep_rq_batch(req)) {
>>>> @@ -1164,6 +1155,16 @@ static const struct attribute_group *virtblk_attr_groups[] = {
>>>>    	NULL,
>>>>    };
>>>> +static int virtblk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
>>>> +		unsigned int hctx_idx)
>>>> +{
>>>> +	struct virtio_blk *vblk = data;
>>>> +	struct virtio_blk_vq *vq = &vblk->vqs[hctx_idx];
>>>> +
>>>> +	hctx->driver_data = vq;
>>>> +	return 0;
>>>> +}
>>>> +
>>>>    static void virtblk_map_queues(struct blk_mq_tag_set *set)
>>>>    {
>>>>    	struct virtio_blk *vblk = set->driver_data;
>>>> @@ -1205,7 +1206,7 @@ static void virtblk_complete_batch(struct io_comp_batch *iob)
>>>>    static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
>>>>    {
>>>>    	struct virtio_blk *vblk = hctx->queue->queuedata;
>>>> -	struct virtio_blk_vq *vq = get_virtio_blk_vq(hctx);
>>>> +	struct virtio_blk_vq *vq = hctx->driver_data;
>>>>    	struct virtblk_req *vbr;
>>>>    	unsigned long flags;
>>>>    	unsigned int len;
>>>> @@ -1236,6 +1237,7 @@ static const struct blk_mq_ops virtio_mq_ops = {
>>>>    	.queue_rqs	= virtio_queue_rqs,
>>>>    	.commit_rqs	= virtio_commit_rqs,
>>>>    	.complete	= virtblk_request_done,
>>>> +	.init_hctx	= virtblk_init_hctx,
>>>>    	.map_queues	= virtblk_map_queues,
>>>>    	.poll		= virtblk_poll,
>>>>    };
>>>> -- 
>>>> 2.18.1

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/1] virtio_blk: implement init_hctx MQ operation
  2024-08-01 15:39       ` Max Gurtovoy
@ 2024-08-01 15:43         ` Michael S. Tsirkin
       [not found]           ` <6a8f0c72-ba77-42c3-8d85-6bb23a23f025@nvidia.com>
  0 siblings, 1 reply; 13+ messages in thread
From: Michael S. Tsirkin @ 2024-08-01 15:43 UTC (permalink / raw)
  To: Max Gurtovoy; +Cc: stefanha, virtualization, axboe, kvm, linux-block, oren

On Thu, Aug 01, 2024 at 06:39:16PM +0300, Max Gurtovoy wrote:
> 
> On 01/08/2024 18:29, Michael S. Tsirkin wrote:
> > On Thu, Aug 01, 2024 at 06:17:21PM +0300, Max Gurtovoy wrote:
> > > On 01/08/2024 18:13, Michael S. Tsirkin wrote:
> > > > On Thu, Aug 01, 2024 at 06:11:37PM +0300, Max Gurtovoy wrote:
> > > > > In this operation set the driver data of the hctx to point to the virtio
> > > > > block queue. By doing so, we can use this reference in the and reduce
> > > > in the .... ?
> > > sorry for the type.
> > > 
> > > should be :
> > > 
> > > "By doing so, we can use this reference and reduce the number of operations in the fast path."
> > ok. what kind of benefit do you see with this patch?
> 
> As mentioned. This is a micro optimization that reduce the number of
> instructions/dereferences in the fast path.

By how much? How random code tweaks affect object code is unpredictable.
Pls show results of objdump to prove it does anything
useful.

> 
> > 
> > > > > the number of operations in the fast path.
> > > > > 
> > > > > Signed-off-by: Max Gurtovoy <mgurtovoy@nvidia.com>
> > > > > ---
> > > > >    drivers/block/virtio_blk.c | 42 ++++++++++++++++++++------------------
> > > > >    1 file changed, 22 insertions(+), 20 deletions(-)
> > > > > 
> > > > > diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
> > > > > index 2351f411fa46..35a7a586f6f5 100644
> > > > > --- a/drivers/block/virtio_blk.c
> > > > > +++ b/drivers/block/virtio_blk.c
> > > > > @@ -129,14 +129,6 @@ static inline blk_status_t virtblk_result(u8 status)
> > > > >    	}
> > > > >    }
> > > > > -static inline struct virtio_blk_vq *get_virtio_blk_vq(struct blk_mq_hw_ctx *hctx)
> > > > > -{
> > > > > -	struct virtio_blk *vblk = hctx->queue->queuedata;
> > > > > -	struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
> > > > > -
> > > > > -	return vq;
> > > > > -}
> > > > > -
> > > > >    static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
> > > > >    {
> > > > >    	struct scatterlist out_hdr, in_hdr, *sgs[3];
> > > > > @@ -377,8 +369,7 @@ static void virtblk_done(struct virtqueue *vq)
> > > > >    static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
> > > > >    {
> > > > > -	struct virtio_blk *vblk = hctx->queue->queuedata;
> > > > > -	struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
> > > > > +	struct virtio_blk_vq *vq = hctx->driver_data;
> > > > >    	bool kick;
> > > > >    	spin_lock_irq(&vq->lock);
> > > > > @@ -428,10 +419,10 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
> > > > >    			   const struct blk_mq_queue_data *bd)
> > > > >    {
> > > > >    	struct virtio_blk *vblk = hctx->queue->queuedata;
> > > > > +	struct virtio_blk_vq *vq = hctx->driver_data;
> > > > >    	struct request *req = bd->rq;
> > > > >    	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
> > > > >    	unsigned long flags;
> > > > > -	int qid = hctx->queue_num;
> > > > >    	bool notify = false;
> > > > >    	blk_status_t status;
> > > > >    	int err;
> > > > > @@ -440,26 +431,26 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
> > > > >    	if (unlikely(status))
> > > > >    		return status;
> > > > > -	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
> > > > > -	err = virtblk_add_req(vblk->vqs[qid].vq, vbr);
> > > > > +	spin_lock_irqsave(&vq->lock, flags);
> > > > > +	err = virtblk_add_req(vq->vq, vbr);
> > > > >    	if (err) {
> > > > > -		virtqueue_kick(vblk->vqs[qid].vq);
> > > > > +		virtqueue_kick(vq->vq);
> > > > >    		/* Don't stop the queue if -ENOMEM: we may have failed to
> > > > >    		 * bounce the buffer due to global resource outage.
> > > > >    		 */
> > > > >    		if (err == -ENOSPC)
> > > > >    			blk_mq_stop_hw_queue(hctx);
> > > > > -		spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
> > > > > +		spin_unlock_irqrestore(&vq->lock, flags);
> > > > >    		virtblk_unmap_data(req, vbr);
> > > > >    		return virtblk_fail_to_queue(req, err);
> > > > >    	}
> > > > > -	if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
> > > > > +	if (bd->last && virtqueue_kick_prepare(vq->vq))
> > > > >    		notify = true;
> > > > > -	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
> > > > > +	spin_unlock_irqrestore(&vq->lock, flags);
> > > > >    	if (notify)
> > > > > -		virtqueue_notify(vblk->vqs[qid].vq);
> > > > > +		virtqueue_notify(vq->vq);
> > > > >    	return BLK_STS_OK;
> > > > >    }
> > > > > @@ -504,7 +495,7 @@ static void virtio_queue_rqs(struct request **rqlist)
> > > > >    	struct request *requeue_list = NULL;
> > > > >    	rq_list_for_each_safe(rqlist, req, next) {
> > > > > -		struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx);
> > > > > +		struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
> > > > >    		bool kick;
> > > > >    		if (!virtblk_prep_rq_batch(req)) {
> > > > > @@ -1164,6 +1155,16 @@ static const struct attribute_group *virtblk_attr_groups[] = {
> > > > >    	NULL,
> > > > >    };
> > > > > +static int virtblk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
> > > > > +		unsigned int hctx_idx)
> > > > > +{
> > > > > +	struct virtio_blk *vblk = data;
> > > > > +	struct virtio_blk_vq *vq = &vblk->vqs[hctx_idx];
> > > > > +
> > > > > +	hctx->driver_data = vq;
> > > > > +	return 0;
> > > > > +}
> > > > > +
> > > > >    static void virtblk_map_queues(struct blk_mq_tag_set *set)
> > > > >    {
> > > > >    	struct virtio_blk *vblk = set->driver_data;
> > > > > @@ -1205,7 +1206,7 @@ static void virtblk_complete_batch(struct io_comp_batch *iob)
> > > > >    static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
> > > > >    {
> > > > >    	struct virtio_blk *vblk = hctx->queue->queuedata;
> > > > > -	struct virtio_blk_vq *vq = get_virtio_blk_vq(hctx);
> > > > > +	struct virtio_blk_vq *vq = hctx->driver_data;
> > > > >    	struct virtblk_req *vbr;
> > > > >    	unsigned long flags;
> > > > >    	unsigned int len;
> > > > > @@ -1236,6 +1237,7 @@ static const struct blk_mq_ops virtio_mq_ops = {
> > > > >    	.queue_rqs	= virtio_queue_rqs,
> > > > >    	.commit_rqs	= virtio_commit_rqs,
> > > > >    	.complete	= virtblk_request_done,
> > > > > +	.init_hctx	= virtblk_init_hctx,
> > > > >    	.map_queues	= virtblk_map_queues,
> > > > >    	.poll		= virtblk_poll,
> > > > >    };
> > > > > -- 
> > > > > 2.18.1


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/1] virtio_blk: implement init_hctx MQ operation
       [not found]           ` <6a8f0c72-ba77-42c3-8d85-6bb23a23f025@nvidia.com>
@ 2024-08-01 17:46             ` Michael S. Tsirkin
  2024-08-01 17:56             ` Stefan Hajnoczi
  1 sibling, 0 replies; 13+ messages in thread
From: Michael S. Tsirkin @ 2024-08-01 17:46 UTC (permalink / raw)
  To: Max Gurtovoy; +Cc: stefanha, virtualization, axboe, kvm, linux-block, oren

On Thu, Aug 01, 2024 at 06:56:44PM +0300, Max Gurtovoy wrote:
> 
> On 01/08/2024 18:43, Michael S. Tsirkin wrote:
> 
>     On Thu, Aug 01, 2024 at 06:39:16PM +0300, Max Gurtovoy wrote:
> 
>         On 01/08/2024 18:29, Michael S. Tsirkin wrote:
> 
>             On Thu, Aug 01, 2024 at 06:17:21PM +0300, Max Gurtovoy wrote:
> 
>                 On 01/08/2024 18:13, Michael S. Tsirkin wrote:
> 
>                     On Thu, Aug 01, 2024 at 06:11:37PM +0300, Max Gurtovoy wrote:
> 
>                         In this operation set the driver data of the hctx to point to the virtio
>                         block queue. By doing so, we can use this reference in the and reduce
> 
>                     in the .... ?
> 
>                 sorry for the type.
> 
>                 should be :
> 
>                 "By doing so, we can use this reference and reduce the number of operations in the fast path."
> 
>             ok. what kind of benefit do you see with this patch?
> 
>         As mentioned. This is a micro optimization that reduce the number of
>         instructions/dereferences in the fast path.
> 
>     By how much? How random code tweaks affect object code is unpredictable.
>     Pls show results of objdump to prove it does anything
>     useful.
> 
> This is the way all modern block drivers such as NVMe PCI/RDMA/TCP use the
> driver_data.
> 
> These drivers don't have driver specific mechanisms to find the queue from the 
> hctx->queue->queuedata like vblk driver has for some unknown reason.
> 
> It is pretty easy to review this patch and see its benefits, isn't it ?
> 
> It is not expected to provide extreme perf improvement.
> 
> It is introduced for aligning the driver to use common MQ mechanisms and reduce
> dereferences.
> 
> This is not "random code tweaks".


Then pls say so in the commit log.

Look I don't have anything for or against this patch.

I do however want to establish that if something is billed as
an "optimization" it has to come with numbers (even if
it's as simple as "size" run on the object file).

If it's just cleaner/simpler, say so.


I'll wait for an ack from Paolo/Stefan, anyway.



>                         the number of operations in the fast path.
> 
>                         Signed-off-by: Max Gurtovoy <mgurtovoy@nvidia.com>
>                         ---
>                            drivers/block/virtio_blk.c | 42 ++++++++++++++++++++------------------
>                            1 file changed, 22 insertions(+), 20 deletions(-)
> 
>                         diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
>                         index 2351f411fa46..35a7a586f6f5 100644
>                         --- a/drivers/block/virtio_blk.c
>                         +++ b/drivers/block/virtio_blk.c
>                         @@ -129,14 +129,6 @@ static inline blk_status_t virtblk_result(u8 status)
>                                 }
>                            }
>                         -static inline struct virtio_blk_vq *get_virtio_blk_vq(struct blk_mq_hw_ctx *hctx)
>                         -{
>                         -       struct virtio_blk *vblk = hctx->queue->queuedata;
>                         -       struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
>                         -
>                         -       return vq;
>                         -}
>                         -
>                            static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
>                            {
>                                 struct scatterlist out_hdr, in_hdr, *sgs[3];
>                         @@ -377,8 +369,7 @@ static void virtblk_done(struct virtqueue *vq)
>                            static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
>                            {
>                         -       struct virtio_blk *vblk = hctx->queue->queuedata;
>                         -       struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
>                         +       struct virtio_blk_vq *vq = hctx->driver_data;
>                                 bool kick;
>                                 spin_lock_irq(&vq->lock);
>                         @@ -428,10 +419,10 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
>                                                    const struct blk_mq_queue_data *bd)
>                            {
>                                 struct virtio_blk *vblk = hctx->queue->queuedata;
>                         +       struct virtio_blk_vq *vq = hctx->driver_data;
>                                 struct request *req = bd->rq;
>                                 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
>                                 unsigned long flags;
>                         -       int qid = hctx->queue_num;
>                                 bool notify = false;
>                                 blk_status_t status;
>                                 int err;
>                         @@ -440,26 +431,26 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
>                                 if (unlikely(status))
>                                         return status;
>                         -       spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
>                         -       err = virtblk_add_req(vblk->vqs[qid].vq, vbr);
>                         +       spin_lock_irqsave(&vq->lock, flags);
>                         +       err = virtblk_add_req(vq->vq, vbr);
>                                 if (err) {
>                         -               virtqueue_kick(vblk->vqs[qid].vq);
>                         +               virtqueue_kick(vq->vq);
>                                         /* Don't stop the queue if -ENOMEM: we may have failed to
>                                          * bounce the buffer due to global resource outage.
>                                          */
>                                         if (err == -ENOSPC)
>                                                 blk_mq_stop_hw_queue(hctx);
>                         -               spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
>                         +               spin_unlock_irqrestore(&vq->lock, flags);
>                                         virtblk_unmap_data(req, vbr);
>                                         return virtblk_fail_to_queue(req, err);
>                                 }
>                         -       if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
>                         +       if (bd->last && virtqueue_kick_prepare(vq->vq))
>                                         notify = true;
>                         -       spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
>                         +       spin_unlock_irqrestore(&vq->lock, flags);
>                                 if (notify)
>                         -               virtqueue_notify(vblk->vqs[qid].vq);
>                         +               virtqueue_notify(vq->vq);
>                                 return BLK_STS_OK;
>                            }
>                         @@ -504,7 +495,7 @@ static void virtio_queue_rqs(struct request **rqlist)
>                                 struct request *requeue_list = NULL;
>                                 rq_list_for_each_safe(rqlist, req, next) {
>                         -               struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx);
>                         +               struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
>                                         bool kick;
>                                         if (!virtblk_prep_rq_batch(req)) {
>                         @@ -1164,6 +1155,16 @@ static const struct attribute_group *virtblk_attr_groups[] = {
>                                 NULL,
>                            };
>                         +static int virtblk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
>                         +               unsigned int hctx_idx)
>                         +{
>                         +       struct virtio_blk *vblk = data;
>                         +       struct virtio_blk_vq *vq = &vblk->vqs[hctx_idx];
>                         +
>                         +       hctx->driver_data = vq;
>                         +       return 0;
>                         +}
>                         +
>                            static void virtblk_map_queues(struct blk_mq_tag_set *set)
>                            {
>                                 struct virtio_blk *vblk = set->driver_data;
>                         @@ -1205,7 +1206,7 @@ static void virtblk_complete_batch(struct io_comp_batch *iob)
>                            static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
>                            {
>                                 struct virtio_blk *vblk = hctx->queue->queuedata;
>                         -       struct virtio_blk_vq *vq = get_virtio_blk_vq(hctx);
>                         +       struct virtio_blk_vq *vq = hctx->driver_data;
>                                 struct virtblk_req *vbr;
>                                 unsigned long flags;
>                                 unsigned int len;
>                         @@ -1236,6 +1237,7 @@ static const struct blk_mq_ops virtio_mq_ops = {
>                                 .queue_rqs      = virtio_queue_rqs,
>                                 .commit_rqs     = virtio_commit_rqs,
>                                 .complete       = virtblk_request_done,
>                         +       .init_hctx      = virtblk_init_hctx,
>                                 .map_queues     = virtblk_map_queues,
>                                 .poll           = virtblk_poll,
>                            };
>                         --
>                         2.18.1
> 


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/1] virtio_blk: implement init_hctx MQ operation
       [not found]           ` <6a8f0c72-ba77-42c3-8d85-6bb23a23f025@nvidia.com>
  2024-08-01 17:46             ` Michael S. Tsirkin
@ 2024-08-01 17:56             ` Stefan Hajnoczi
  2024-08-02 22:07               ` Max Gurtovoy
  1 sibling, 1 reply; 13+ messages in thread
From: Stefan Hajnoczi @ 2024-08-01 17:56 UTC (permalink / raw)
  To: Max Gurtovoy
  Cc: Michael S. Tsirkin, virtualization, axboe, kvm, linux-block, oren

[-- Attachment #1: Type: text/plain, Size: 2021 bytes --]

On Thu, Aug 01, 2024 at 06:56:44PM +0300, Max Gurtovoy wrote:
> 
> On 01/08/2024 18:43, Michael S. Tsirkin wrote:
> > On Thu, Aug 01, 2024 at 06:39:16PM +0300, Max Gurtovoy wrote:
> > > On 01/08/2024 18:29, Michael S. Tsirkin wrote:
> > > > On Thu, Aug 01, 2024 at 06:17:21PM +0300, Max Gurtovoy wrote:
> > > > > On 01/08/2024 18:13, Michael S. Tsirkin wrote:
> > > > > > On Thu, Aug 01, 2024 at 06:11:37PM +0300, Max Gurtovoy wrote:
> > > > > > > In this operation set the driver data of the hctx to point to the virtio
> > > > > > > block queue. By doing so, we can use this reference in the and reduce
> > > > > > in the .... ?
> > > > > sorry for the type.
> > > > > 
> > > > > should be :
> > > > > 
> > > > > "By doing so, we can use this reference and reduce the number of operations in the fast path."
> > > > ok. what kind of benefit do you see with this patch?
> > > As mentioned. This is a micro optimization that reduce the number of
> > > instructions/dereferences in the fast path.
> > By how much? How random code tweaks affect object code is unpredictable.
> > Pls show results of objdump to prove it does anything
> > useful.
> 
> This is the way all modern block drivers such as NVMe PCI/RDMA/TCP use the
> driver_data.
> 
> These drivers don't have driver specific mechanisms to find the queue from
> the hctx->queue->queuedata like vblk driver has for some unknown reason.
> 
> It is pretty easy to review this patch and see its benefits, isn't it ?
> 
> It is not expected to provide extreme perf improvement.
> 
> It is introduced for aligning the driver to use common MQ mechanisms and
> reduce dereferences.
> 
> This is not "random code tweaks".

If you cannot observe a performance change, then adjusting the commit
description to explain this as a code cleanup to reduce dereferences and
local variables, improving code readability seems fine to me. I think
it's a nice cleanup when presented as such rather than a performance
optimization.

Stefan

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 488 bytes --]

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/1] virtio_blk: implement init_hctx MQ operation
  2024-08-01 17:56             ` Stefan Hajnoczi
@ 2024-08-02 22:07               ` Max Gurtovoy
  2024-08-03 12:39                 ` Michael S. Tsirkin
  0 siblings, 1 reply; 13+ messages in thread
From: Max Gurtovoy @ 2024-08-02 22:07 UTC (permalink / raw)
  To: Stefan Hajnoczi
  Cc: Michael S. Tsirkin, virtualization, axboe, kvm, linux-block, oren


On 01/08/2024 20:56, Stefan Hajnoczi wrote:
> On Thu, Aug 01, 2024 at 06:56:44PM +0300, Max Gurtovoy wrote:
>> On 01/08/2024 18:43, Michael S. Tsirkin wrote:
>>> On Thu, Aug 01, 2024 at 06:39:16PM +0300, Max Gurtovoy wrote:
>>>> On 01/08/2024 18:29, Michael S. Tsirkin wrote:
>>>>> On Thu, Aug 01, 2024 at 06:17:21PM +0300, Max Gurtovoy wrote:
>>>>>> On 01/08/2024 18:13, Michael S. Tsirkin wrote:
>>>>>>> On Thu, Aug 01, 2024 at 06:11:37PM +0300, Max Gurtovoy wrote:
>>>>>>>> In this operation set the driver data of the hctx to point to the virtio
>>>>>>>> block queue. By doing so, we can use this reference in the and reduce
>>>>>>> in the .... ?
>>>>>> sorry for the type.
>>>>>>
>>>>>> should be :
>>>>>>
>>>>>> "By doing so, we can use this reference and reduce the number of operations in the fast path."
>>>>> ok. what kind of benefit do you see with this patch?
>>>> As mentioned. This is a micro optimization that reduce the number of
>>>> instructions/dereferences in the fast path.
>>> By how much? How random code tweaks affect object code is unpredictable.
>>> Pls show results of objdump to prove it does anything
>>> useful.
>> This is the way all modern block drivers such as NVMe PCI/RDMA/TCP use the
>> driver_data.
>>
>> These drivers don't have driver specific mechanisms to find the queue from
>> the hctx->queue->queuedata like vblk driver has for some unknown reason.
>>
>> It is pretty easy to review this patch and see its benefits, isn't it ?
>>
>> It is not expected to provide extreme perf improvement.
>>
>> It is introduced for aligning the driver to use common MQ mechanisms and
>> reduce dereferences.
>>
>> This is not "random code tweaks".
> If you cannot observe a performance change, then adjusting the commit
> description to explain this as a code cleanup to reduce dereferences and
> local variables, improving code readability seems fine to me. I think
> it's a nice cleanup when presented as such rather than a performance
> optimization.
>
> Stefan

Sure. Please check the bellow adjustment:

virtio_blk: implement init_hctx MQ operation

Set the driver data of the hardware context (hctx) to point directly to
the virtio block queue. This cleanup improves code readability, reduces
the number of dereferences, and minimizes local variables in the fast
path.



^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/1] virtio_blk: implement init_hctx MQ operation
  2024-08-02 22:07               ` Max Gurtovoy
@ 2024-08-03 12:39                 ` Michael S. Tsirkin
  2024-08-03 17:54                   ` Max Gurtovoy
  0 siblings, 1 reply; 13+ messages in thread
From: Michael S. Tsirkin @ 2024-08-03 12:39 UTC (permalink / raw)
  To: Max Gurtovoy
  Cc: Stefan Hajnoczi, virtualization, axboe, kvm, linux-block, oren

On Sat, Aug 03, 2024 at 01:07:27AM +0300, Max Gurtovoy wrote:
> 
> On 01/08/2024 20:56, Stefan Hajnoczi wrote:
> > On Thu, Aug 01, 2024 at 06:56:44PM +0300, Max Gurtovoy wrote:
> > > On 01/08/2024 18:43, Michael S. Tsirkin wrote:
> > > > On Thu, Aug 01, 2024 at 06:39:16PM +0300, Max Gurtovoy wrote:
> > > > > On 01/08/2024 18:29, Michael S. Tsirkin wrote:
> > > > > > On Thu, Aug 01, 2024 at 06:17:21PM +0300, Max Gurtovoy wrote:
> > > > > > > On 01/08/2024 18:13, Michael S. Tsirkin wrote:
> > > > > > > > On Thu, Aug 01, 2024 at 06:11:37PM +0300, Max Gurtovoy wrote:
> > > > > > > > > In this operation set the driver data of the hctx to point to the virtio
> > > > > > > > > block queue. By doing so, we can use this reference in the and reduce
> > > > > > > > in the .... ?
> > > > > > > sorry for the type.
> > > > > > > 
> > > > > > > should be :
> > > > > > > 
> > > > > > > "By doing so, we can use this reference and reduce the number of operations in the fast path."
> > > > > > ok. what kind of benefit do you see with this patch?
> > > > > As mentioned. This is a micro optimization that reduce the number of
> > > > > instructions/dereferences in the fast path.
> > > > By how much? How random code tweaks affect object code is unpredictable.
> > > > Pls show results of objdump to prove it does anything
> > > > useful.
> > > This is the way all modern block drivers such as NVMe PCI/RDMA/TCP use the
> > > driver_data.
> > > 
> > > These drivers don't have driver specific mechanisms to find the queue from
> > > the hctx->queue->queuedata like vblk driver has for some unknown reason.
> > > 
> > > It is pretty easy to review this patch and see its benefits, isn't it ?
> > > 
> > > It is not expected to provide extreme perf improvement.
> > > 
> > > It is introduced for aligning the driver to use common MQ mechanisms and
> > > reduce dereferences.
> > > 
> > > This is not "random code tweaks".
> > If you cannot observe a performance change, then adjusting the commit
> > description to explain this as a code cleanup to reduce dereferences and
> > local variables, improving code readability seems fine to me. I think
> > it's a nice cleanup when presented as such rather than a performance
> > optimization.
> > 
> > Stefan
> 
> Sure. Please check the bellow adjustment:
> 
> virtio_blk: implement init_hctx MQ operation
> 
> Set the driver data of the hardware context (hctx) to point directly to
> the virtio block queue. This cleanup improves code readability, reduces
> the number of dereferences, and minimizes local variables in the fast
> path.

I'd drop the local variables part, it is not at all clear why is that
a win.

-- 
MST


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/1] virtio_blk: implement init_hctx MQ operation
  2024-08-03 12:39                 ` Michael S. Tsirkin
@ 2024-08-03 17:54                   ` Max Gurtovoy
  2024-08-07 13:19                     ` Stefan Hajnoczi
  2024-08-07 13:34                     ` Michael S. Tsirkin
  0 siblings, 2 replies; 13+ messages in thread
From: Max Gurtovoy @ 2024-08-03 17:54 UTC (permalink / raw)
  To: Michael S. Tsirkin
  Cc: Stefan Hajnoczi, virtualization, axboe, kvm, linux-block, oren


On 03/08/2024 15:39, Michael S. Tsirkin wrote:
> On Sat, Aug 03, 2024 at 01:07:27AM +0300, Max Gurtovoy wrote:
>> On 01/08/2024 20:56, Stefan Hajnoczi wrote:
>>> On Thu, Aug 01, 2024 at 06:56:44PM +0300, Max Gurtovoy wrote:
>>>> On 01/08/2024 18:43, Michael S. Tsirkin wrote:
>>>>> On Thu, Aug 01, 2024 at 06:39:16PM +0300, Max Gurtovoy wrote:
>>>>>> On 01/08/2024 18:29, Michael S. Tsirkin wrote:
>>>>>>> On Thu, Aug 01, 2024 at 06:17:21PM +0300, Max Gurtovoy wrote:
>>>>>>>> On 01/08/2024 18:13, Michael S. Tsirkin wrote:
>>>>>>>>> On Thu, Aug 01, 2024 at 06:11:37PM +0300, Max Gurtovoy wrote:
>>>>>>>>>> In this operation set the driver data of the hctx to point to the virtio
>>>>>>>>>> block queue. By doing so, we can use this reference in the and reduce
>>>>>>>>> in the .... ?
>>>>>>>> sorry for the type.
>>>>>>>>
>>>>>>>> should be :
>>>>>>>>
>>>>>>>> "By doing so, we can use this reference and reduce the number of operations in the fast path."
>>>>>>> ok. what kind of benefit do you see with this patch?
>>>>>> As mentioned. This is a micro optimization that reduce the number of
>>>>>> instructions/dereferences in the fast path.
>>>>> By how much? How random code tweaks affect object code is unpredictable.
>>>>> Pls show results of objdump to prove it does anything
>>>>> useful.
>>>> This is the way all modern block drivers such as NVMe PCI/RDMA/TCP use the
>>>> driver_data.
>>>>
>>>> These drivers don't have driver specific mechanisms to find the queue from
>>>> the hctx->queue->queuedata like vblk driver has for some unknown reason.
>>>>
>>>> It is pretty easy to review this patch and see its benefits, isn't it ?
>>>>
>>>> It is not expected to provide extreme perf improvement.
>>>>
>>>> It is introduced for aligning the driver to use common MQ mechanisms and
>>>> reduce dereferences.
>>>>
>>>> This is not "random code tweaks".
>>> If you cannot observe a performance change, then adjusting the commit
>>> description to explain this as a code cleanup to reduce dereferences and
>>> local variables, improving code readability seems fine to me. I think
>>> it's a nice cleanup when presented as such rather than a performance
>>> optimization.
>>>
>>> Stefan
>> Sure. Please check the bellow adjustment:
>>
>> virtio_blk: implement init_hctx MQ operation
>>
>> Set the driver data of the hardware context (hctx) to point directly to
>> the virtio block queue. This cleanup improves code readability, reduces
>> the number of dereferences, and minimizes local variables in the fast
>> path.
> I'd drop the local variables part, it is not at all clear why is that
> a win.

We can drop it:

virtio_blk: implement init_hctx MQ operation

Set the driver data of the hardware context (hctx) to point directly to
the virtio block queue. This cleanup improves code readability and reduces
the number of dereferences in the fast path.



^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/1] virtio_blk: implement init_hctx MQ operation
  2024-08-03 17:54                   ` Max Gurtovoy
@ 2024-08-07 13:19                     ` Stefan Hajnoczi
  2024-08-07 13:34                     ` Michael S. Tsirkin
  1 sibling, 0 replies; 13+ messages in thread
From: Stefan Hajnoczi @ 2024-08-07 13:19 UTC (permalink / raw)
  To: Max Gurtovoy
  Cc: Michael S. Tsirkin, virtualization, axboe, kvm, linux-block, oren

[-- Attachment #1: Type: text/plain, Size: 3376 bytes --]

On Sat, Aug 03, 2024 at 08:54:45PM +0300, Max Gurtovoy wrote:
> 
> On 03/08/2024 15:39, Michael S. Tsirkin wrote:
> > On Sat, Aug 03, 2024 at 01:07:27AM +0300, Max Gurtovoy wrote:
> > > On 01/08/2024 20:56, Stefan Hajnoczi wrote:
> > > > On Thu, Aug 01, 2024 at 06:56:44PM +0300, Max Gurtovoy wrote:
> > > > > On 01/08/2024 18:43, Michael S. Tsirkin wrote:
> > > > > > On Thu, Aug 01, 2024 at 06:39:16PM +0300, Max Gurtovoy wrote:
> > > > > > > On 01/08/2024 18:29, Michael S. Tsirkin wrote:
> > > > > > > > On Thu, Aug 01, 2024 at 06:17:21PM +0300, Max Gurtovoy wrote:
> > > > > > > > > On 01/08/2024 18:13, Michael S. Tsirkin wrote:
> > > > > > > > > > On Thu, Aug 01, 2024 at 06:11:37PM +0300, Max Gurtovoy wrote:
> > > > > > > > > > > In this operation set the driver data of the hctx to point to the virtio
> > > > > > > > > > > block queue. By doing so, we can use this reference in the and reduce
> > > > > > > > > > in the .... ?
> > > > > > > > > sorry for the type.
> > > > > > > > > 
> > > > > > > > > should be :
> > > > > > > > > 
> > > > > > > > > "By doing so, we can use this reference and reduce the number of operations in the fast path."
> > > > > > > > ok. what kind of benefit do you see with this patch?
> > > > > > > As mentioned. This is a micro optimization that reduce the number of
> > > > > > > instructions/dereferences in the fast path.
> > > > > > By how much? How random code tweaks affect object code is unpredictable.
> > > > > > Pls show results of objdump to prove it does anything
> > > > > > useful.
> > > > > This is the way all modern block drivers such as NVMe PCI/RDMA/TCP use the
> > > > > driver_data.
> > > > > 
> > > > > These drivers don't have driver specific mechanisms to find the queue from
> > > > > the hctx->queue->queuedata like vblk driver has for some unknown reason.
> > > > > 
> > > > > It is pretty easy to review this patch and see its benefits, isn't it ?
> > > > > 
> > > > > It is not expected to provide extreme perf improvement.
> > > > > 
> > > > > It is introduced for aligning the driver to use common MQ mechanisms and
> > > > > reduce dereferences.
> > > > > 
> > > > > This is not "random code tweaks".
> > > > If you cannot observe a performance change, then adjusting the commit
> > > > description to explain this as a code cleanup to reduce dereferences and
> > > > local variables, improving code readability seems fine to me. I think
> > > > it's a nice cleanup when presented as such rather than a performance
> > > > optimization.
> > > > 
> > > > Stefan
> > > Sure. Please check the bellow adjustment:
> > > 
> > > virtio_blk: implement init_hctx MQ operation
> > > 
> > > Set the driver data of the hardware context (hctx) to point directly to
> > > the virtio block queue. This cleanup improves code readability, reduces
> > > the number of dereferences, and minimizes local variables in the fast
> > > path.
> > I'd drop the local variables part, it is not at all clear why is that
> > a win.
> 
> We can drop it:
> 
> virtio_blk: implement init_hctx MQ operation
> 
> Set the driver data of the hardware context (hctx) to point directly to
> the virtio block queue. This cleanup improves code readability and reduces
> the number of dereferences in the fast path.
> 
> 

Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 488 bytes --]

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/1] virtio_blk: implement init_hctx MQ operation
  2024-08-03 17:54                   ` Max Gurtovoy
  2024-08-07 13:19                     ` Stefan Hajnoczi
@ 2024-08-07 13:34                     ` Michael S. Tsirkin
  1 sibling, 0 replies; 13+ messages in thread
From: Michael S. Tsirkin @ 2024-08-07 13:34 UTC (permalink / raw)
  To: Max Gurtovoy
  Cc: Stefan Hajnoczi, virtualization, axboe, kvm, linux-block, oren

On Sat, Aug 03, 2024 at 08:54:45PM +0300, Max Gurtovoy wrote:
> 
> On 03/08/2024 15:39, Michael S. Tsirkin wrote:
> > On Sat, Aug 03, 2024 at 01:07:27AM +0300, Max Gurtovoy wrote:
> > > On 01/08/2024 20:56, Stefan Hajnoczi wrote:
> > > > On Thu, Aug 01, 2024 at 06:56:44PM +0300, Max Gurtovoy wrote:
> > > > > On 01/08/2024 18:43, Michael S. Tsirkin wrote:
> > > > > > On Thu, Aug 01, 2024 at 06:39:16PM +0300, Max Gurtovoy wrote:
> > > > > > > On 01/08/2024 18:29, Michael S. Tsirkin wrote:
> > > > > > > > On Thu, Aug 01, 2024 at 06:17:21PM +0300, Max Gurtovoy wrote:
> > > > > > > > > On 01/08/2024 18:13, Michael S. Tsirkin wrote:
> > > > > > > > > > On Thu, Aug 01, 2024 at 06:11:37PM +0300, Max Gurtovoy wrote:
> > > > > > > > > > > In this operation set the driver data of the hctx to point to the virtio
> > > > > > > > > > > block queue. By doing so, we can use this reference in the and reduce
> > > > > > > > > > in the .... ?
> > > > > > > > > sorry for the type.
> > > > > > > > > 
> > > > > > > > > should be :
> > > > > > > > > 
> > > > > > > > > "By doing so, we can use this reference and reduce the number of operations in the fast path."
> > > > > > > > ok. what kind of benefit do you see with this patch?
> > > > > > > As mentioned. This is a micro optimization that reduce the number of
> > > > > > > instructions/dereferences in the fast path.
> > > > > > By how much? How random code tweaks affect object code is unpredictable.
> > > > > > Pls show results of objdump to prove it does anything
> > > > > > useful.
> > > > > This is the way all modern block drivers such as NVMe PCI/RDMA/TCP use the
> > > > > driver_data.
> > > > > 
> > > > > These drivers don't have driver specific mechanisms to find the queue from
> > > > > the hctx->queue->queuedata like vblk driver has for some unknown reason.
> > > > > 
> > > > > It is pretty easy to review this patch and see its benefits, isn't it ?
> > > > > 
> > > > > It is not expected to provide extreme perf improvement.
> > > > > 
> > > > > It is introduced for aligning the driver to use common MQ mechanisms and
> > > > > reduce dereferences.
> > > > > 
> > > > > This is not "random code tweaks".
> > > > If you cannot observe a performance change, then adjusting the commit
> > > > description to explain this as a code cleanup to reduce dereferences and
> > > > local variables, improving code readability seems fine to me. I think
> > > > it's a nice cleanup when presented as such rather than a performance
> > > > optimization.
> > > > 
> > > > Stefan
> > > Sure. Please check the bellow adjustment:
> > > 
> > > virtio_blk: implement init_hctx MQ operation
> > > 
> > > Set the driver data of the hardware context (hctx) to point directly to
> > > the virtio block queue. This cleanup improves code readability, reduces
> > > the number of dereferences, and minimizes local variables in the fast
> > > path.
> > I'd drop the local variables part, it is not at all clear why is that
> > a win.
> 
> We can drop it:
> 
> virtio_blk: implement init_hctx MQ operation
> 
> Set the driver data of the hardware context (hctx) to point directly to
> the virtio block queue. This cleanup improves code readability and reduces
> the number of dereferences in the fast path.
> 


yep. also pls drop 1/1 from subject. Just [PATCH vX]

pls repost with these commit log tweaks, I will queue.
-- 
MST


^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2024-08-07 13:34 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-08-01 15:11 [PATCH 1/1] virtio_blk: implement init_hctx MQ operation Max Gurtovoy
2024-08-01 15:13 ` Michael S. Tsirkin
2024-08-01 15:17   ` Max Gurtovoy
2024-08-01 15:29     ` Michael S. Tsirkin
2024-08-01 15:39       ` Max Gurtovoy
2024-08-01 15:43         ` Michael S. Tsirkin
     [not found]           ` <6a8f0c72-ba77-42c3-8d85-6bb23a23f025@nvidia.com>
2024-08-01 17:46             ` Michael S. Tsirkin
2024-08-01 17:56             ` Stefan Hajnoczi
2024-08-02 22:07               ` Max Gurtovoy
2024-08-03 12:39                 ` Michael S. Tsirkin
2024-08-03 17:54                   ` Max Gurtovoy
2024-08-07 13:19                     ` Stefan Hajnoczi
2024-08-07 13:34                     ` Michael S. Tsirkin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).