linux-nvme.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
From: swise@opengridcomputing.com (Steve Wise)
Subject: [PATCH] nvme-rdma: Always signal fabrics private commands
Date: Wed, 29 Jun 2016 09:57:16 -0500	[thread overview]
Message-ID: <010501d1d216$86c0f0c0$9442d240$@opengridcomputing.com> (raw)
In-Reply-To: <005201d1d148$32c33740$9849a5c0$@opengridcomputing.com>

> > On Sun, Jun 26, 2016@07:41:39PM +0300, Sagi Grimberg wrote:
> > > Our error path is freeing the tagset before we free the queue (draining
> > > the qp) so we get to a use-after-free condition (->done() is a freed
> > > tag memory).
> > >
> > > Note that we must allocate the qp before we allocate the tagset because
> > > we need the device when init_request callouts come. So we allocated
> > > before, we free after. An alternative fix was to free the queue before
> > > the tagset even though we allocated it before (as Steve suggested).
> >
> > Would draining, but not freeing the qp before freeing the tagset work?
> > That seems like the most sensible option here.
> 
> disconnecting and draining, I think.

How about this?

diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index e1205c0..d3a0396 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -604,23 +604,32 @@ out_destroy_cm_id:
        return ret;
 }

-static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
+static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
 {
-       if (!test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags))
-               return;
-
        rdma_disconnect(queue->cm_id);
        ib_drain_qp(queue->qp);
+}
+
+static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
+{
        nvme_rdma_destroy_queue_ib(queue);
        rdma_destroy_id(queue->cm_id);
 }

+static void nvme_rdma_stop_and_free_queue(struct nvme_rdma_queue *queue)
+{
+       if (!test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags))
+               return;
+       nvme_rdma_stop_queue(queue);
+       nvme_rdma_free_queue(queue);
+}
+
 static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl)
 {
        int i;

        for (i = 1; i < ctrl->queue_count; i++)
-               nvme_rdma_free_queue(&ctrl->queues[i]);
+               nvme_rdma_stop_and_free_queue(&ctrl->queues[i]);
 }

 static int nvme_rdma_connect_io_queues(struct nvme_rdma_ctrl *ctrl)
@@ -653,7 +662,7 @@ static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl
*ctrl)

 out_free_queues:
        for (; i >= 1; i--)
-               nvme_rdma_free_queue(&ctrl->queues[i]);
+               nvme_rdma_stop_and_free_queue(&ctrl->queues[i]);

        return ret;
 }
@@ -662,7 +671,7 @@ static void nvme_rdma_destroy_admin_queue(struct
nvme_rdma_ctrl *ctrl)
 {
        nvme_rdma_free_qe(ctrl->queues[0].device->dev, &ctrl->async_event_sqe,
                        sizeof(struct nvme_command), DMA_TO_DEVICE);
-       nvme_rdma_free_queue(&ctrl->queues[0]);
+       nvme_rdma_stop_and_free_queue(&ctrl->queues[0]);
        blk_cleanup_queue(ctrl->ctrl.admin_q);
        blk_mq_free_tag_set(&ctrl->admin_tag_set);
        nvme_rdma_dev_put(ctrl->device);
@@ -705,7 +714,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct
*work)
                        goto requeue;
        }

-       nvme_rdma_free_queue(&ctrl->queues[0]);
+       nvme_rdma_stop_and_free_queue(&ctrl->queues[0]);

        ret = blk_mq_reinit_tagset(&ctrl->admin_tag_set);
        if (ret)
@@ -1617,6 +1626,7 @@ static int nvme_rdma_configure_admin_queue(struct
nvme_rdma_ctrl *ctrl)
 out_cleanup_queue:
        blk_cleanup_queue(ctrl->ctrl.admin_q);
 out_free_tagset:
+       nvme_rdma_stop_queue(&ctrl->queues[0]);
        blk_mq_free_tag_set(&ctrl->admin_tag_set);
 out_put_dev:
        nvme_rdma_dev_put(ctrl->device);

  reply	other threads:[~2016-06-29 14:57 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-06-23 16:08 [PATCH] nvme-rdma: Always signal fabrics private commands Sagi Grimberg
2016-06-23 18:17 ` Steve Wise
2016-06-24  7:07 ` Christoph Hellwig
2016-06-24 14:05   ` Steve Wise
2016-06-26 16:41   ` Sagi Grimberg
2016-06-28  8:41     ` Christoph Hellwig
2016-06-28 14:20       ` Steve Wise
2016-06-29 14:57         ` Steve Wise [this message]
2016-06-30  6:36           ` 'Christoph Hellwig'
2016-06-30 13:44             ` Steve Wise
2016-06-30 15:10               ` Steve Wise
2016-07-13 10:08               ` Sagi Grimberg
2016-07-13 10:11                 ` Sagi Grimberg
2016-07-13 14:28                   ` Steve Wise
2016-07-13 14:47                     ` Sagi Grimberg
2016-07-13 14:51                       ` Steve Wise
2016-07-13 15:02                         ` Sagi Grimberg
2016-07-13 15:12                           ` Steve Wise

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='010501d1d216$86c0f0c0$9442d240$@opengridcomputing.com' \
    --to=swise@opengridcomputing.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).