linux-block.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] virtio-blk: Avoid use-after-free on suspend/resume
@ 2022-07-31  7:09 Shigeru Yoshida
  2022-08-10 11:45 ` Shigeru Yoshida
  0 siblings, 1 reply; 4+ messages in thread
From: Shigeru Yoshida @ 2022-07-31  7:09 UTC (permalink / raw)
  To: mst, jasowang, axboe
  Cc: pbonzini, stefanha, virtualization, linux-block, linux-kernel,
	Shigeru Yoshida

hctx->user_data is set to vq in virtblk_init_hctx().  However, vq is
freed on suspend and reallocated on resume.  So, hctx->user_data is
invalid after resume, and it will cause use-after-free accessing which
will result in the kernel crash something like below:

[   22.428391] Call Trace:
[   22.428899]  <TASK>
[   22.429339]  virtqueue_add_split+0x3eb/0x620
[   22.430035]  ? __blk_mq_alloc_requests+0x17f/0x2d0
[   22.430789]  ? kvm_clock_get_cycles+0x14/0x30
[   22.431496]  virtqueue_add_sgs+0xad/0xd0
[   22.432108]  virtblk_add_req+0xe8/0x150
[   22.432692]  virtio_queue_rqs+0xeb/0x210
[   22.433330]  blk_mq_flush_plug_list+0x1b8/0x280
[   22.434059]  __blk_flush_plug+0xe1/0x140
[   22.434853]  blk_finish_plug+0x20/0x40
[   22.435512]  read_pages+0x20a/0x2e0
[   22.436063]  ? folio_add_lru+0x62/0xa0
[   22.436652]  page_cache_ra_unbounded+0x112/0x160
[   22.437365]  filemap_get_pages+0xe1/0x5b0
[   22.437964]  ? context_to_sid+0x70/0x100
[   22.438580]  ? sidtab_context_to_sid+0x32/0x400
[   22.439979]  filemap_read+0xcd/0x3d0
[   22.440917]  xfs_file_buffered_read+0x4a/0xc0
[   22.441984]  xfs_file_read_iter+0x65/0xd0
[   22.442970]  __kernel_read+0x160/0x2e0
[   22.443921]  bprm_execve+0x21b/0x640
[   22.444809]  do_execveat_common.isra.0+0x1a8/0x220
[   22.446008]  __x64_sys_execve+0x2d/0x40
[   22.446920]  do_syscall_64+0x37/0x90
[   22.447773]  entry_SYSCALL_64_after_hwframe+0x63/0xcd

This patch fixes this issue by getting vq from vblk, and removes
virtblk_init_hctx().

Signed-off-by: Shigeru Yoshida <shigeru.yoshida@gmail.com>
---
 drivers/block/virtio_blk.c | 24 ++++++++++--------------
 1 file changed, 10 insertions(+), 14 deletions(-)

diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6fc7850c2b0a..d756423e0059 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -101,6 +101,14 @@ static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
 	}
 }
 
+static inline struct virtio_blk_vq *get_virtio_blk_vq(struct blk_mq_hw_ctx *hctx)
+{
+	struct virtio_blk *vblk = hctx->queue->queuedata;
+	struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
+
+	return vq;
+}
+
 static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
 {
 	struct scatterlist hdr, status, *sgs[3];
@@ -416,7 +424,7 @@ static void virtio_queue_rqs(struct request **rqlist)
 	struct request *requeue_list = NULL;
 
 	rq_list_for_each_safe(rqlist, req, next) {
-		struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
+		struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx);
 		bool kick;
 
 		if (!virtblk_prep_rq_batch(req)) {
@@ -837,7 +845,7 @@ static void virtblk_complete_batch(struct io_comp_batch *iob)
 static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
 {
 	struct virtio_blk *vblk = hctx->queue->queuedata;
-	struct virtio_blk_vq *vq = hctx->driver_data;
+	struct virtio_blk_vq *vq = get_virtio_blk_vq(hctx);
 	struct virtblk_req *vbr;
 	unsigned long flags;
 	unsigned int len;
@@ -862,22 +870,10 @@ static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
 	return found;
 }
 
-static int virtblk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
-			  unsigned int hctx_idx)
-{
-	struct virtio_blk *vblk = data;
-	struct virtio_blk_vq *vq = &vblk->vqs[hctx_idx];
-
-	WARN_ON(vblk->tag_set.tags[hctx_idx] != hctx->tags);
-	hctx->driver_data = vq;
-	return 0;
-}
-
 static const struct blk_mq_ops virtio_mq_ops = {
 	.queue_rq	= virtio_queue_rq,
 	.queue_rqs	= virtio_queue_rqs,
 	.commit_rqs	= virtio_commit_rqs,
-	.init_hctx	= virtblk_init_hctx,
 	.complete	= virtblk_request_done,
 	.map_queues	= virtblk_map_queues,
 	.poll		= virtblk_poll,
-- 
2.37.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] virtio-blk: Avoid use-after-free on suspend/resume
  2022-07-31  7:09 [PATCH] virtio-blk: Avoid use-after-free on suspend/resume Shigeru Yoshida
@ 2022-08-10 11:45 ` Shigeru Yoshida
  2022-08-10 12:41   ` Michael S. Tsirkin
  0 siblings, 1 reply; 4+ messages in thread
From: Shigeru Yoshida @ 2022-08-10 11:45 UTC (permalink / raw)
  To: mst, jasowang, axboe
  Cc: pbonzini, stefanha, virtualization, linux-block, linux-kernel,
	shigeru.yoshida

ping?

On Sun, 31 Jul 2022 16:09:51 +0900, Shigeru Yoshida wrote:
> hctx->user_data is set to vq in virtblk_init_hctx().  However, vq is
> freed on suspend and reallocated on resume.  So, hctx->user_data is
> invalid after resume, and it will cause use-after-free accessing which
> will result in the kernel crash something like below:
> 
> [   22.428391] Call Trace:
> [   22.428899]  <TASK>
> [   22.429339]  virtqueue_add_split+0x3eb/0x620
> [   22.430035]  ? __blk_mq_alloc_requests+0x17f/0x2d0
> [   22.430789]  ? kvm_clock_get_cycles+0x14/0x30
> [   22.431496]  virtqueue_add_sgs+0xad/0xd0
> [   22.432108]  virtblk_add_req+0xe8/0x150
> [   22.432692]  virtio_queue_rqs+0xeb/0x210
> [   22.433330]  blk_mq_flush_plug_list+0x1b8/0x280
> [   22.434059]  __blk_flush_plug+0xe1/0x140
> [   22.434853]  blk_finish_plug+0x20/0x40
> [   22.435512]  read_pages+0x20a/0x2e0
> [   22.436063]  ? folio_add_lru+0x62/0xa0
> [   22.436652]  page_cache_ra_unbounded+0x112/0x160
> [   22.437365]  filemap_get_pages+0xe1/0x5b0
> [   22.437964]  ? context_to_sid+0x70/0x100
> [   22.438580]  ? sidtab_context_to_sid+0x32/0x400
> [   22.439979]  filemap_read+0xcd/0x3d0
> [   22.440917]  xfs_file_buffered_read+0x4a/0xc0
> [   22.441984]  xfs_file_read_iter+0x65/0xd0
> [   22.442970]  __kernel_read+0x160/0x2e0
> [   22.443921]  bprm_execve+0x21b/0x640
> [   22.444809]  do_execveat_common.isra.0+0x1a8/0x220
> [   22.446008]  __x64_sys_execve+0x2d/0x40
> [   22.446920]  do_syscall_64+0x37/0x90
> [   22.447773]  entry_SYSCALL_64_after_hwframe+0x63/0xcd
> 
> This patch fixes this issue by getting vq from vblk, and removes
> virtblk_init_hctx().
> 
> Signed-off-by: Shigeru Yoshida <shigeru.yoshida@gmail.com>
> ---
>  drivers/block/virtio_blk.c | 24 ++++++++++--------------
>  1 file changed, 10 insertions(+), 14 deletions(-)
> 
> diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
> index 6fc7850c2b0a..d756423e0059 100644
> --- a/drivers/block/virtio_blk.c
> +++ b/drivers/block/virtio_blk.c
> @@ -101,6 +101,14 @@ static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
>  	}
>  }
>  
> +static inline struct virtio_blk_vq *get_virtio_blk_vq(struct blk_mq_hw_ctx *hctx)
> +{
> +	struct virtio_blk *vblk = hctx->queue->queuedata;
> +	struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
> +
> +	return vq;
> +}
> +
>  static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
>  {
>  	struct scatterlist hdr, status, *sgs[3];
> @@ -416,7 +424,7 @@ static void virtio_queue_rqs(struct request **rqlist)
>  	struct request *requeue_list = NULL;
>  
>  	rq_list_for_each_safe(rqlist, req, next) {
> -		struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
> +		struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx);
>  		bool kick;
>  
>  		if (!virtblk_prep_rq_batch(req)) {
> @@ -837,7 +845,7 @@ static void virtblk_complete_batch(struct io_comp_batch *iob)
>  static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
>  {
>  	struct virtio_blk *vblk = hctx->queue->queuedata;
> -	struct virtio_blk_vq *vq = hctx->driver_data;
> +	struct virtio_blk_vq *vq = get_virtio_blk_vq(hctx);
>  	struct virtblk_req *vbr;
>  	unsigned long flags;
>  	unsigned int len;
> @@ -862,22 +870,10 @@ static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
>  	return found;
>  }
>  
> -static int virtblk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
> -			  unsigned int hctx_idx)
> -{
> -	struct virtio_blk *vblk = data;
> -	struct virtio_blk_vq *vq = &vblk->vqs[hctx_idx];
> -
> -	WARN_ON(vblk->tag_set.tags[hctx_idx] != hctx->tags);
> -	hctx->driver_data = vq;
> -	return 0;
> -}
> -
>  static const struct blk_mq_ops virtio_mq_ops = {
>  	.queue_rq	= virtio_queue_rq,
>  	.queue_rqs	= virtio_queue_rqs,
>  	.commit_rqs	= virtio_commit_rqs,
> -	.init_hctx	= virtblk_init_hctx,
>  	.complete	= virtblk_request_done,
>  	.map_queues	= virtblk_map_queues,
>  	.poll		= virtblk_poll,
> -- 
> 2.37.1
> 

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] virtio-blk: Avoid use-after-free on suspend/resume
  2022-08-10 11:45 ` Shigeru Yoshida
@ 2022-08-10 12:41   ` Michael S. Tsirkin
  2022-08-10 12:55     ` Shigeru Yoshida
  0 siblings, 1 reply; 4+ messages in thread
From: Michael S. Tsirkin @ 2022-08-10 12:41 UTC (permalink / raw)
  To: Shigeru Yoshida
  Cc: jasowang, axboe, pbonzini, stefanha, virtualization, linux-block,
	linux-kernel

On Wed, Aug 10, 2022 at 08:45:56PM +0900, Shigeru Yoshida wrote:
> ping?
> 
> On Sun, 31 Jul 2022 16:09:51 +0900, Shigeru Yoshida wrote:
> > hctx->user_data is set to vq in virtblk_init_hctx().  However, vq is
> > freed on suspend and reallocated on resume.  So, hctx->user_data is
> > invalid after resume, and it will cause use-after-free accessing which
> > will result in the kernel crash something like below:
> > 
> > [   22.428391] Call Trace:
> > [   22.428899]  <TASK>
> > [   22.429339]  virtqueue_add_split+0x3eb/0x620
> > [   22.430035]  ? __blk_mq_alloc_requests+0x17f/0x2d0
> > [   22.430789]  ? kvm_clock_get_cycles+0x14/0x30
> > [   22.431496]  virtqueue_add_sgs+0xad/0xd0
> > [   22.432108]  virtblk_add_req+0xe8/0x150
> > [   22.432692]  virtio_queue_rqs+0xeb/0x210
> > [   22.433330]  blk_mq_flush_plug_list+0x1b8/0x280
> > [   22.434059]  __blk_flush_plug+0xe1/0x140
> > [   22.434853]  blk_finish_plug+0x20/0x40
> > [   22.435512]  read_pages+0x20a/0x2e0
> > [   22.436063]  ? folio_add_lru+0x62/0xa0
> > [   22.436652]  page_cache_ra_unbounded+0x112/0x160
> > [   22.437365]  filemap_get_pages+0xe1/0x5b0
> > [   22.437964]  ? context_to_sid+0x70/0x100
> > [   22.438580]  ? sidtab_context_to_sid+0x32/0x400
> > [   22.439979]  filemap_read+0xcd/0x3d0
> > [   22.440917]  xfs_file_buffered_read+0x4a/0xc0
> > [   22.441984]  xfs_file_read_iter+0x65/0xd0
> > [   22.442970]  __kernel_read+0x160/0x2e0
> > [   22.443921]  bprm_execve+0x21b/0x640
> > [   22.444809]  do_execveat_common.isra.0+0x1a8/0x220
> > [   22.446008]  __x64_sys_execve+0x2d/0x40
> > [   22.446920]  do_syscall_64+0x37/0x90
> > [   22.447773]  entry_SYSCALL_64_after_hwframe+0x63/0xcd
> > 
> > This patch fixes this issue by getting vq from vblk, and removes
> > virtblk_init_hctx().
> > 
> > Signed-off-by: Shigeru Yoshida <shigeru.yoshida@gmail.com>

Fixes: 4e0400525691 ("virtio-blk: support polling I/O")
Cc: "Suwan Kim" <suwan.kim027@gmail.com>

I assume?

> > ---
> >  drivers/block/virtio_blk.c | 24 ++++++++++--------------
> >  1 file changed, 10 insertions(+), 14 deletions(-)
> > 
> > diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
> > index 6fc7850c2b0a..d756423e0059 100644
> > --- a/drivers/block/virtio_blk.c
> > +++ b/drivers/block/virtio_blk.c
> > @@ -101,6 +101,14 @@ static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
> >  	}
> >  }
> >  
> > +static inline struct virtio_blk_vq *get_virtio_blk_vq(struct blk_mq_hw_ctx *hctx)
> > +{
> > +	struct virtio_blk *vblk = hctx->queue->queuedata;
> > +	struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
> > +
> > +	return vq;
> > +}
> > +
> >  static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
> >  {
> >  	struct scatterlist hdr, status, *sgs[3];
> > @@ -416,7 +424,7 @@ static void virtio_queue_rqs(struct request **rqlist)
> >  	struct request *requeue_list = NULL;
> >  
> >  	rq_list_for_each_safe(rqlist, req, next) {
> > -		struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
> > +		struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx);
> >  		bool kick;
> >  
> >  		if (!virtblk_prep_rq_batch(req)) {
> > @@ -837,7 +845,7 @@ static void virtblk_complete_batch(struct io_comp_batch *iob)
> >  static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
> >  {
> >  	struct virtio_blk *vblk = hctx->queue->queuedata;
> > -	struct virtio_blk_vq *vq = hctx->driver_data;
> > +	struct virtio_blk_vq *vq = get_virtio_blk_vq(hctx);
> >  	struct virtblk_req *vbr;
> >  	unsigned long flags;
> >  	unsigned int len;
> > @@ -862,22 +870,10 @@ static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
> >  	return found;
> >  }
> >  
> > -static int virtblk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
> > -			  unsigned int hctx_idx)
> > -{
> > -	struct virtio_blk *vblk = data;
> > -	struct virtio_blk_vq *vq = &vblk->vqs[hctx_idx];
> > -
> > -	WARN_ON(vblk->tag_set.tags[hctx_idx] != hctx->tags);
> > -	hctx->driver_data = vq;
> > -	return 0;
> > -}
> > -
> >  static const struct blk_mq_ops virtio_mq_ops = {
> >  	.queue_rq	= virtio_queue_rq,
> >  	.queue_rqs	= virtio_queue_rqs,
> >  	.commit_rqs	= virtio_commit_rqs,
> > -	.init_hctx	= virtblk_init_hctx,
> >  	.complete	= virtblk_request_done,
> >  	.map_queues	= virtblk_map_queues,
> >  	.poll		= virtblk_poll,
> > -- 
> > 2.37.1
> > 


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] virtio-blk: Avoid use-after-free on suspend/resume
  2022-08-10 12:41   ` Michael S. Tsirkin
@ 2022-08-10 12:55     ` Shigeru Yoshida
  0 siblings, 0 replies; 4+ messages in thread
From: Shigeru Yoshida @ 2022-08-10 12:55 UTC (permalink / raw)
  To: mst
  Cc: jasowang, axboe, pbonzini, stefanha, virtualization, linux-block,
	linux-kernel


On Wed, 10 Aug 2022 08:41:04 -0400, Michael S. Tsirkin wrote:
> On Wed, Aug 10, 2022 at 08:45:56PM +0900, Shigeru Yoshida wrote:
>> ping?
>> 
>> On Sun, 31 Jul 2022 16:09:51 +0900, Shigeru Yoshida wrote:
>> > hctx->user_data is set to vq in virtblk_init_hctx().  However, vq is
>> > freed on suspend and reallocated on resume.  So, hctx->user_data is
>> > invalid after resume, and it will cause use-after-free accessing which
>> > will result in the kernel crash something like below:
>> > 
>> > [   22.428391] Call Trace:
>> > [   22.428899]  <TASK>
>> > [   22.429339]  virtqueue_add_split+0x3eb/0x620
>> > [   22.430035]  ? __blk_mq_alloc_requests+0x17f/0x2d0
>> > [   22.430789]  ? kvm_clock_get_cycles+0x14/0x30
>> > [   22.431496]  virtqueue_add_sgs+0xad/0xd0
>> > [   22.432108]  virtblk_add_req+0xe8/0x150
>> > [   22.432692]  virtio_queue_rqs+0xeb/0x210
>> > [   22.433330]  blk_mq_flush_plug_list+0x1b8/0x280
>> > [   22.434059]  __blk_flush_plug+0xe1/0x140
>> > [   22.434853]  blk_finish_plug+0x20/0x40
>> > [   22.435512]  read_pages+0x20a/0x2e0
>> > [   22.436063]  ? folio_add_lru+0x62/0xa0
>> > [   22.436652]  page_cache_ra_unbounded+0x112/0x160
>> > [   22.437365]  filemap_get_pages+0xe1/0x5b0
>> > [   22.437964]  ? context_to_sid+0x70/0x100
>> > [   22.438580]  ? sidtab_context_to_sid+0x32/0x400
>> > [   22.439979]  filemap_read+0xcd/0x3d0
>> > [   22.440917]  xfs_file_buffered_read+0x4a/0xc0
>> > [   22.441984]  xfs_file_read_iter+0x65/0xd0
>> > [   22.442970]  __kernel_read+0x160/0x2e0
>> > [   22.443921]  bprm_execve+0x21b/0x640
>> > [   22.444809]  do_execveat_common.isra.0+0x1a8/0x220
>> > [   22.446008]  __x64_sys_execve+0x2d/0x40
>> > [   22.446920]  do_syscall_64+0x37/0x90
>> > [   22.447773]  entry_SYSCALL_64_after_hwframe+0x63/0xcd
>> > 
>> > This patch fixes this issue by getting vq from vblk, and removes
>> > virtblk_init_hctx().
>> > 
>> > Signed-off-by: Shigeru Yoshida <shigeru.yoshida@gmail.com>
> 
> Fixes: 4e0400525691 ("virtio-blk: support polling I/O")
> Cc: "Suwan Kim" <suwan.kim027@gmail.com>
> 
> I assume?

Yes, this patch fixes the commit you mentioned.

Thanks,
Shigeru

> 
>> > ---
>> >  drivers/block/virtio_blk.c | 24 ++++++++++--------------
>> >  1 file changed, 10 insertions(+), 14 deletions(-)
>> > 
>> > diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
>> > index 6fc7850c2b0a..d756423e0059 100644
>> > --- a/drivers/block/virtio_blk.c
>> > +++ b/drivers/block/virtio_blk.c
>> > @@ -101,6 +101,14 @@ static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
>> >  	}
>> >  }
>> >  
>> > +static inline struct virtio_blk_vq *get_virtio_blk_vq(struct blk_mq_hw_ctx *hctx)
>> > +{
>> > +	struct virtio_blk *vblk = hctx->queue->queuedata;
>> > +	struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
>> > +
>> > +	return vq;
>> > +}
>> > +
>> >  static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
>> >  {
>> >  	struct scatterlist hdr, status, *sgs[3];
>> > @@ -416,7 +424,7 @@ static void virtio_queue_rqs(struct request **rqlist)
>> >  	struct request *requeue_list = NULL;
>> >  
>> >  	rq_list_for_each_safe(rqlist, req, next) {
>> > -		struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
>> > +		struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx);
>> >  		bool kick;
>> >  
>> >  		if (!virtblk_prep_rq_batch(req)) {
>> > @@ -837,7 +845,7 @@ static void virtblk_complete_batch(struct io_comp_batch *iob)
>> >  static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
>> >  {
>> >  	struct virtio_blk *vblk = hctx->queue->queuedata;
>> > -	struct virtio_blk_vq *vq = hctx->driver_data;
>> > +	struct virtio_blk_vq *vq = get_virtio_blk_vq(hctx);
>> >  	struct virtblk_req *vbr;
>> >  	unsigned long flags;
>> >  	unsigned int len;
>> > @@ -862,22 +870,10 @@ static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
>> >  	return found;
>> >  }
>> >  
>> > -static int virtblk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
>> > -			  unsigned int hctx_idx)
>> > -{
>> > -	struct virtio_blk *vblk = data;
>> > -	struct virtio_blk_vq *vq = &vblk->vqs[hctx_idx];
>> > -
>> > -	WARN_ON(vblk->tag_set.tags[hctx_idx] != hctx->tags);
>> > -	hctx->driver_data = vq;
>> > -	return 0;
>> > -}
>> > -
>> >  static const struct blk_mq_ops virtio_mq_ops = {
>> >  	.queue_rq	= virtio_queue_rq,
>> >  	.queue_rqs	= virtio_queue_rqs,
>> >  	.commit_rqs	= virtio_commit_rqs,
>> > -	.init_hctx	= virtblk_init_hctx,
>> >  	.complete	= virtblk_request_done,
>> >  	.map_queues	= virtblk_map_queues,
>> >  	.poll		= virtblk_poll,
>> > -- 
>> > 2.37.1
>> > 
> 

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2022-08-10 12:55 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-07-31  7:09 [PATCH] virtio-blk: Avoid use-after-free on suspend/resume Shigeru Yoshida
2022-08-10 11:45 ` Shigeru Yoshida
2022-08-10 12:41   ` Michael S. Tsirkin
2022-08-10 12:55     ` Shigeru Yoshida

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).