public inbox for linux-rdma@vger.kernel.org
 help / color / mirror / Atom feed
From: Yi Zhang <yizhan-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
To: Sagi Grimberg <sagi-NQWnxTmZq1alnMjI0IkVqw@public.gmane.org>,
	linux-nvme-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r@public.gmane.org,
	linux-rdma-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
Cc: hch-jcswGhMUV9g@public.gmane.org
Subject: Re: kernull NULL pointer observed on initiator side after 'nvmetcli clear' on target side
Date: Fri, 10 Mar 2017 15:59:17 +0800	[thread overview]
Message-ID: <b0a84bcc-7dca-d342-b30e-b01eba8088cd@redhat.com> (raw)
In-Reply-To: <6ffda302-02f9-12f0-a112-ea7cd20b9ffa-NQWnxTmZq1alnMjI0IkVqw@public.gmane.org>


>>
>
> Yep... looks like we don't take into account that we can't use all the
> queues now...
>
> Does this patch help:
Still can reproduce the reconnect in 10 seconds issues with the patch, 
here is the log:

[  193.574183] nvme nvme0: new ctrl: NQN "nvme-subsystem-name", addr 
172.31.2.3:1023
[  193.612039] __nvme_rdma_init_request: changing called
[  193.638723] __nvme_rdma_init_request: changing called
[  193.661767] __nvme_rdma_init_request: changing called
[  193.684579] __nvme_rdma_init_request: changing called
[  193.707327] __nvme_rdma_init_request: changing called
[  193.730071] __nvme_rdma_init_request: changing called
[  193.752896] __nvme_rdma_init_request: changing called
[  193.775699] __nvme_rdma_init_request: changing called
[  193.798813] __nvme_rdma_init_request: changing called
[  193.821257] __nvme_rdma_init_request: changing called
[  193.844090] __nvme_rdma_init_request: changing called
[  193.866472] __nvme_rdma_init_request: changing called
[  193.889375] __nvme_rdma_init_request: changing called
[  193.912094] __nvme_rdma_init_request: changing called
[  193.934942] __nvme_rdma_init_request: changing called
[  193.957688] __nvme_rdma_init_request: changing called
[  606.273376] Broke affinity for irq 16
[  606.291940] Broke affinity for irq 28
[  606.310201] Broke affinity for irq 90
[  606.328211] Broke affinity for irq 93
[  606.346263] Broke affinity for irq 97
[  606.364314] Broke affinity for irq 100
[  606.382105] Broke affinity for irq 104
[  606.400727] smpboot: CPU 1 is now offline
[  616.820505] nvme nvme0: reconnecting in 10 seconds
[  626.882747] blk_mq_reinit_tagset: tag is null, continue
[  626.914000] nvme nvme0: Connect rejected: status 8 (invalid service ID).
[  626.947965] nvme nvme0: rdma_resolve_addr wait failed (-104).
[  626.974673] nvme nvme0: Failed reconnect attempt, requeueing...
[  637.100252] blk_mq_reinit_tagset: tag is null, continue
[  637.129200] nvme nvme0: Connect rejected: status 8 (invalid service ID).
[  637.163578] nvme nvme0: rdma_resolve_addr wait failed (-104).
[  637.190246] nvme nvme0: Failed reconnect attempt, requeueing...
[  647.340147] blk_mq_reinit_tagset: tag is null, continue
[  647.367612] nvme nvme0: Connect rejected: status 8 (invalid service ID).
[  647.402527] nvme nvme0: rdma_resolve_addr wait failed (-104).
[  647.430338] nvme nvme0: Failed reconnect attempt, requeueing...
[  657.579993] blk_mq_reinit_tagset: tag is null, continue
[  657.608478] nvme nvme0: Connect rejected: status 8 (invalid service ID).
[  657.643947] nvme nvme0: rdma_resolve_addr wait failed (-104).
[  657.670579] nvme nvme0: Failed reconnect attempt, requeueing...
[  667.819897] blk_mq_reinit_tagset: tag is null, continue
[  667.848786] nvme nvme0: Connect rejected: status 8 (invalid service ID).
[  667.881951] nvme nvme0: rdma_resolve_addr wait failed (-104).
[  667.908578] nvme nvme0: Failed reconnect attempt, requeueing...
[  678.059821] blk_mq_reinit_tagset: tag is null, continue
[  678.089295] nvme nvme0: Connect rejected: status 8 (invalid service ID).
[  678.123602] nvme nvme0: rdma_resolve_addr wait failed (-104).
[  678.150317] nvme nvme0: Failed reconnect attempt, requeueing...


> -- 
> diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
> index 29ac8fcb8d2c..25af3f75f6f1 100644
> --- a/drivers/nvme/host/rdma.c
> +++ b/drivers/nvme/host/rdma.c
> @@ -337,8 +337,6 @@ static int __nvme_rdma_init_request(struct 
> nvme_rdma_ctrl *ctrl,
>         struct ib_device *ibdev = dev->dev;
>         int ret;
>
> -       BUG_ON(queue_idx >= ctrl->queue_count);
> -
>         ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct 
> nvme_command),
>                         DMA_TO_DEVICE);
>         if (ret)
> @@ -647,8 +645,22 @@ static int nvme_rdma_connect_io_queues(struct 
> nvme_rdma_ctrl *ctrl)
>
>  static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
>  {
> +       struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
> +       unsigned int nr_io_queues;
>         int i, ret;
>
> +       nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
> +       ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
> +       if (ret)
> +               return ret;
> +
> +       ctrl->queue_count = nr_io_queues + 1;
> +       if (ctrl->queue_count < 2)
> +               return 0;
> +
> +       dev_info(ctrl->ctrl.device,
> +               "creating %d I/O queues.\n", nr_io_queues);
> +
>         for (i = 1; i < ctrl->queue_count; i++) {
>                 ret = nvme_rdma_init_queue(ctrl, i,
> ctrl->ctrl.opts->queue_size);
> @@ -1793,20 +1805,8 @@ static const struct nvme_ctrl_ops 
> nvme_rdma_ctrl_ops = {
>
>  static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl)
>  {
> -       struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
>         int ret;
>
> -       ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
> -       if (ret)
> -               return ret;
> -
> -       ctrl->queue_count = opts->nr_io_queues + 1;
> -       if (ctrl->queue_count < 2)
> -               return 0;
> -
> -       dev_info(ctrl->ctrl.device,
> -               "creating %d I/O queues.\n", opts->nr_io_queues);
> -
> -- 

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

  parent reply	other threads:[~2017-03-10  7:59 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <1832491330.31443919.1488709276951.JavaMail.zimbra@redhat.com>
     [not found] ` <1832491330.31443919.1488709276951.JavaMail.zimbra-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2017-03-05 13:39   ` kernull NULL pointer observed on initiator side after 'nvmetcli clear' on target side Yi Zhang
     [not found]     ` <1053522223.31446389.1488721184925.JavaMail.zimbra-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2017-03-06 11:25       ` Sagi Grimberg
     [not found]         ` <644fc4ab-df6b-a337-1431-bad881ef56ee-NQWnxTmZq1alnMjI0IkVqw@public.gmane.org>
2017-03-09  4:02           ` Yi Zhang
     [not found]             ` <88ae146a-7510-9be0-c9b4-58e70f9d73b9-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2017-03-09 11:23               ` Sagi Grimberg
     [not found]                 ` <6ffda302-02f9-12f0-a112-ea7cd20b9ffa-NQWnxTmZq1alnMjI0IkVqw@public.gmane.org>
2017-03-10  7:59                   ` Yi Zhang [this message]
     [not found]                     ` <b0a84bcc-7dca-d342-b30e-b01eba8088cd-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2017-03-13  8:09                       ` Sagi Grimberg
     [not found]                         ` <6fe6d285-3cb4-c88c-9a7c-741fce54120c-NQWnxTmZq1alnMjI0IkVqw@public.gmane.org>
2017-03-14 13:27                           ` Yi Zhang
     [not found]                             ` <7a955472-1975-1b73-c88c-367576a56884-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2017-03-16 16:40                               ` Sagi Grimberg
     [not found]                                 ` <bbb45ff5-8b61-2508-df4a-7c90eb6637de-NQWnxTmZq1alnMjI0IkVqw@public.gmane.org>
2017-03-18 12:06                                   ` Yi Zhang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=b0a84bcc-7dca-d342-b30e-b01eba8088cd@redhat.com \
    --to=yizhan-h+wxahxf7alqt0dzr+alfa@public.gmane.org \
    --cc=hch-jcswGhMUV9g@public.gmane.org \
    --cc=linux-nvme-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r@public.gmane.org \
    --cc=linux-rdma-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
    --cc=sagi-NQWnxTmZq1alnMjI0IkVqw@public.gmane.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox