public inbox for linux-nvme@lists.infradead.org
 help / color / mirror / Atom feed
From: Maurizio Lombardi <mlombard@redhat.com>
To: kbusch@kernel.org
Cc: mheyne@amazon.de, emilne@redhat.com, jmeneghi@redhat.com,
	linux-nvme@lists.infradead.org, dwagner@suse.de,
	mlombard@arkamax.eu, mkhalfella@purestorage.com,
	chaitanyak@nvidia.com, hare@kernel.org, hch@lst.de
Subject: [PATCH V3 7/8] nvmet-loop: do not alloc admin tag set during reset
Date: Fri, 10 Apr 2026 09:39:23 +0200	[thread overview]
Message-ID: <20260410073924.61078-8-mlombard@redhat.com> (raw)
In-Reply-To: <20260410073924.61078-1-mlombard@redhat.com>

Currently, resetting a loopback controller unconditionally invokes
nvme_alloc_admin_tag_set() inside nvme_loop_configure_admin_queue().
Doing so drops the old queue and allocates a new one. Consequently,
this reverts the admin queue's timeout (q->rq_timeout) back to the
module default (NVME_ADMIN_TIMEOUT), completely wiping out any custom
timeout values the user may have configured via sysfs and potentially
racing against the sysfs nvme_admin_timeout_store() function
that may dereference the admin_q pointer during the RESETTING state.

Add a 'new' boolean parameter to nvme_loop_configure_admin_queue() to
distinguish between an initial probe and a reset. Only allocate the
admin tag set during the initial controller creation and do not
remove it when resetting the controller.
This ensures the existing queues and their sysfs-configured timeouts
remain intact across controller resets.

Signed-off-by: Maurizio Lombardi <mlombard@redhat.com>
---
 drivers/nvme/target/loop.c | 38 +++++++++++++++++++++-----------------
 1 file changed, 21 insertions(+), 17 deletions(-)

diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index d98d0cdc5d6f..c9cf0ddc1b85 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -261,7 +261,7 @@ static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
 	.init_hctx	= nvme_loop_init_admin_hctx,
 };
 
-static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
+static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl, bool remove)
 {
 	if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
 		return;
@@ -274,7 +274,8 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
 
 	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
 	nvmet_cq_put(&ctrl->queues[0].nvme_cq);
-	nvme_remove_admin_tag_set(&ctrl->ctrl);
+	if (remove)
+		nvme_remove_admin_tag_set(&ctrl->ctrl);
 }
 
 static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
@@ -361,7 +362,7 @@ static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
 	return 0;
 }
 
-static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
+static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl, bool new)
 {
 	int error;
 
@@ -375,12 +376,15 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
 	}
 	ctrl->ctrl.queue_count = 1;
 
-	error = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
-			&nvme_loop_admin_mq_ops,
-			sizeof(struct nvme_loop_iod) +
-			NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
-	if (error)
-		goto out_free_sq;
+	if (new) {
+		error = nvme_alloc_admin_tag_set(&ctrl->ctrl,
+				&ctrl->admin_tag_set,
+				&nvme_loop_admin_mq_ops,
+				sizeof(struct nvme_loop_iod) +
+				NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
+		if (error)
+			goto out_free_sq;
+	}
 
 	/* reset stopped state for the fresh admin queue */
 	clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->ctrl.flags);
@@ -415,7 +419,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
 	return error;
 }
 
-static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
+static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl, bool remove)
 {
 	if (ctrl->ctrl.queue_count > 1) {
 		nvme_quiesce_io_queues(&ctrl->ctrl);
@@ -426,12 +430,12 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
 	if (nvme_ctrl_state(&ctrl->ctrl) == NVME_CTRL_LIVE)
 		nvme_disable_ctrl(&ctrl->ctrl, true);
 
-	nvme_loop_destroy_admin_queue(ctrl);
+	nvme_loop_destroy_admin_queue(ctrl, remove);
 }
 
 static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl)
 {
-	nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl));
+	nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl), true);
 }
 
 static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
@@ -453,7 +457,7 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
 	int ret;
 
 	nvme_stop_ctrl(&ctrl->ctrl);
-	nvme_loop_shutdown_ctrl(ctrl);
+	nvme_loop_shutdown_ctrl(ctrl, false);
 
 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
 		enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
@@ -465,7 +469,7 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
 		return;
 	}
 
-	ret = nvme_loop_configure_admin_queue(ctrl);
+	ret = nvme_loop_configure_admin_queue(ctrl, false);
 	if (ret)
 		goto out_disable;
 
@@ -492,7 +496,7 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
 out_destroy_admin:
 	nvme_quiesce_admin_queue(&ctrl->ctrl);
 	nvme_cancel_admin_tagset(&ctrl->ctrl);
-	nvme_loop_destroy_admin_queue(ctrl);
+	nvme_loop_destroy_admin_queue(ctrl, true);
 out_disable:
 	dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
 	nvme_uninit_ctrl(&ctrl->ctrl);
@@ -594,7 +598,7 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
 	if (!ctrl->queues)
 		goto out_uninit_ctrl;
 
-	ret = nvme_loop_configure_admin_queue(ctrl);
+	ret = nvme_loop_configure_admin_queue(ctrl, true);
 	if (ret)
 		goto out_free_queues;
 
@@ -632,7 +636,7 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
 out_remove_admin_queue:
 	nvme_quiesce_admin_queue(&ctrl->ctrl);
 	nvme_cancel_admin_tagset(&ctrl->ctrl);
-	nvme_loop_destroy_admin_queue(ctrl);
+	nvme_loop_destroy_admin_queue(ctrl, true);
 out_free_queues:
 	kfree(ctrl->queues);
 out_uninit_ctrl:
-- 
2.53.0



  parent reply	other threads:[~2026-04-10  7:40 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-04-10  7:39 [PATCH V3 0/8] nvme: Refactor and expose per-controller timeout configuration Maurizio Lombardi
2026-04-10  7:39 ` [PATCH V3 1/8] nvme: Let the blocklayer set timeouts for requests Maurizio Lombardi
2026-04-10  7:39 ` [PATCH V3 2/8] nvme: add sysfs attribute to change admin timeout per nvme controller Maurizio Lombardi
2026-04-10  7:39 ` [PATCH V3 3/8] nvme: pci: use admin queue timeout over NVME_ADMIN_TIMEOUT Maurizio Lombardi
2026-04-10  7:39 ` [PATCH V3 4/8] nvme: add sysfs attribute to change IO timeout per nvme controller Maurizio Lombardi
2026-04-10  7:39 ` [PATCH V3 5/8] nvme: use per controller timeout waits over depending on global default Maurizio Lombardi
2026-04-10  7:39 ` [PATCH V3 6/8] nvme-core: align fabrics_q teardown with admin_q in nvme_free_ctrl Maurizio Lombardi
2026-04-10  7:39 ` Maurizio Lombardi [this message]
2026-04-10  7:39 ` [PATCH V3 8/8] nvme-core: warn on allocating admin tag set with existing queue Maurizio Lombardi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260410073924.61078-8-mlombard@redhat.com \
    --to=mlombard@redhat.com \
    --cc=chaitanyak@nvidia.com \
    --cc=dwagner@suse.de \
    --cc=emilne@redhat.com \
    --cc=hare@kernel.org \
    --cc=hch@lst.de \
    --cc=jmeneghi@redhat.com \
    --cc=kbusch@kernel.org \
    --cc=linux-nvme@lists.infradead.org \
    --cc=mheyne@amazon.de \
    --cc=mkhalfella@purestorage.com \
    --cc=mlombard@arkamax.eu \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox