public inbox for linux-arm-kernel@lists.infradead.org
 help / color / mirror / Atom feed
* [PATCH] nvme: Move nvme_setup_cmd before hot_pathing
       [not found] <CGME20260320052101epcms2p42ae135da60b36685e9b7fca6849b57a6@epcms2p4>
@ 2026-03-20  5:21 ` 전민식
  2026-03-20 13:43   ` Kanchan Joshi
                     ` (4 more replies)
  0 siblings, 5 replies; 19+ messages in thread
From: 전민식 @ 2026-03-20  5:21 UTC (permalink / raw)
  To: kbusch@kernel.org, axboe@kernel.dk
  Cc: sven@kernel.org, j@jannau.net, neal@gompa.dev, hch@lst.de,
	sagi@grimberg.me, justin.tee@broadcom.com,
	nareshgottumukkala83@gmail.com, paul.ely@broadcom.com,
	kch@nvidia.com, linux-arm-kernel@lists.infradead.org,
	linux-nvme@lists.infradead.org, asahi@lists.linux.dev,
	linux-kernel@vger.kernel.org, 전민식

From e54937be5ca0a2b38fffe6611c360625dc0a8903 Mon Sep 17 00:00:00 2001
From: Minsik Jeon <hmi.jeon@samsung.com>
Date: Fri, 20 Mar 2026 14:48:07 +0900
Subject: [PATCH] nvme: Move nvme_setup_cmd before hot_pathing

we were checking host_pathing_error before calling nvme_setup_cmd().
This is caused the command setup to be skipped entirely when a pathing
error occurred, making it impossible to trace the nvme command via
trace_cmd nvme_complete_rq().

As a result, when nvme_complete_rq() logged a completion with cmdid=0,
it was impossible to correlate the completion with the nvme command
request.

This patch reorders the logic to first call nvme_setup_cmd(), then
perform the host_pathing_error check.

Co-authored-by: Beomsoo Kim <beomsooo.kim@samsung.com>
Co-authored-by: Eunsoo Lee <euns212.lee@samsung.com>
Co-authored-by: Steven Seungcheol Lee <sc108.lee@samsung.com>
Signed-off-by: Minsik Jeon <hmi.jeon@samsung.com>
---
 drivers/nvme/host/apple.c  | 6 +++---
 drivers/nvme/host/fc.c     | 8 ++++----
 drivers/nvme/host/pci.c    | 8 ++++----
 drivers/nvme/host/rdma.c   | 8 ++++----
 drivers/nvme/host/tcp.c    | 8 ++++----
 drivers/nvme/target/loop.c | 6 +++---
 6 files changed, 22 insertions(+), 22 deletions(-)

diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index ed61b97fde59..2a28c992d024 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -783,13 +783,13 @@ static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
 	if (unlikely(!READ_ONCE(q->enabled)))
 		return BLK_STS_IOERR;
 
-	if (!nvme_check_ready(&anv->ctrl, req, true))
-		return nvme_fail_nonready_command(&anv->ctrl, req);
-
 	ret = nvme_setup_cmd(ns, req);
 	if (ret)
 		return ret;
 
+	if (!nvme_check_ready(&anv->ctrl, req, true))
+		return nvme_fail_nonready_command(&anv->ctrl, req);
+
 	if (blk_rq_nr_phys_segments(req)) {
 		ret = apple_nvme_map_data(anv, req, cmnd);
 		if (ret)
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index e1bb4707183c..8ea37102a836 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2762,14 +2762,14 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
 	u32 data_len;
 	blk_status_t ret;
 
-	if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
-	    !nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
-		return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
-
 	ret = nvme_setup_cmd(ns, rq);
 	if (ret)
 		return ret;
 
+	if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
+	    !nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
+		return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
+
 	/*
 	 * nvme core doesn't quite treat the rq opaquely. Commands such
 	 * as WRITE ZEROES will return a non-zero rq payload_bytes yet
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index b78ba239c8ea..ad0363f7e681 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1376,10 +1376,6 @@ static blk_status_t nvme_prep_rq(struct request *req)
 	iod->meta_total_len = 0;
 	iod->nr_dma_vecs = 0;
 
-	ret = nvme_setup_cmd(req->q->queuedata, req);
-	if (ret)
-		return ret;
-
 	if (blk_rq_nr_phys_segments(req)) {
 		ret = nvme_map_data(req);
 		if (ret)
@@ -1418,6 +1414,10 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
 	if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
 		return BLK_STS_IOERR;
 
+	ret = nvme_setup_cmd(req->q->queuedata, req);
+	if (ret)
+		return ret;
+
 	if (unlikely(!nvme_check_ready(&dev->ctrl, req, true)))
 		return nvme_fail_nonready_command(&dev->ctrl, req);
 
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 57111139e84f..96248d81237e 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -2005,6 +2005,10 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
 
 	WARN_ON_ONCE(rq->tag < 0);
 
+	ret = nvme_setup_cmd(ns, rq);
+	if (ret)
+		return ret;
+
 	if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
 		return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
 
@@ -2020,10 +2024,6 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
 	ib_dma_sync_single_for_cpu(dev, sqe->dma,
 			sizeof(struct nvme_command), DMA_TO_DEVICE);
 
-	ret = nvme_setup_cmd(ns, rq);
-	if (ret)
-		goto unmap_qe;
-
 	nvme_start_request(rq);
 
 	if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 243dab830dc8..1a3640e81b8f 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2705,10 +2705,6 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
 	u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
 	blk_status_t ret;
 
-	ret = nvme_setup_cmd(ns, rq);
-	if (ret)
-		return ret;
-
 	req->state = NVME_TCP_SEND_CMD_PDU;
 	req->status = cpu_to_le16(NVME_SC_SUCCESS);
 	req->offset = 0;
@@ -2768,6 +2764,10 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
 	bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
 	blk_status_t ret;
 
+	ret = nvme_setup_cmd(ns, rq);
+	if (ret)
+		return ret;
+
 	if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
 		return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
 
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 4b3f4f11928d..475b532d08e8 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -140,13 +140,13 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
 	bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags);
 	blk_status_t ret;
 
-	if (!nvme_check_ready(&queue->ctrl->ctrl, req, queue_ready))
-		return nvme_fail_nonready_command(&queue->ctrl->ctrl, req);
-
 	ret = nvme_setup_cmd(ns, req);
 	if (ret)
 		return ret;
 
+	if (!nvme_check_ready(&queue->ctrl->ctrl, req, queue_ready))
+		return nvme_fail_nonready_command(&queue->ctrl->ctrl, req);
+
 	nvme_start_request(req);
 	iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
 	iod->req.port = queue->ctrl->port;
-- 
2.52.0



^ permalink raw reply related	[flat|nested] 19+ messages in thread

* Re: [PATCH] nvme: Move nvme_setup_cmd before hot_pathing
  2026-03-20  5:21 ` [PATCH] nvme: Move nvme_setup_cmd before hot_pathing 전민식
@ 2026-03-20 13:43   ` Kanchan Joshi
  2026-03-23  1:04     ` [PATCH v2] " 전민식
  2026-03-22  7:16   ` [PATCH] " kernel test robot
                     ` (3 subsequent siblings)
  4 siblings, 1 reply; 19+ messages in thread
From: Kanchan Joshi @ 2026-03-20 13:43 UTC (permalink / raw)
  To: hmi.jeon, kbusch@kernel.org, axboe@kernel.dk
  Cc: sven@kernel.org, j@jannau.net, neal@gompa.dev, hch@lst.de,
	sagi@grimberg.me, justin.tee@broadcom.com,
	nareshgottumukkala83@gmail.com, paul.ely@broadcom.com,
	kch@nvidia.com, linux-arm-kernel@lists.infradead.org,
	linux-nvme@lists.infradead.org, asahi@lists.linux.dev,
	linux-kernel@vger.kernel.org

On 3/20/2026 10:51 AM, 전민식 wrote:
> diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
> index b78ba239c8ea..ad0363f7e681 100644
> --- a/drivers/nvme/host/pci.c
> +++ b/drivers/nvme/host/pci.c
> @@ -1376,10 +1376,6 @@ static blk_status_t nvme_prep_rq(struct request *req)
>   	iod->meta_total_len = 0;
>   	iod->nr_dma_vecs = 0;
>   
> -	ret = nvme_setup_cmd(req->q->queuedata, req);
> -	if (ret)
> -		return ret;
> -
>   	if (blk_rq_nr_phys_segments(req)) {
>   		ret = nvme_map_data(req);
>   		if (ret)
> @@ -1418,6 +1414,10 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
>   	if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
>   		return BLK_STS_IOERR;
>   
> +	ret = nvme_setup_cmd(req->q->queuedata, req);
> +	if (ret)
> +		return ret;
> +
>   	if (unlikely(!nvme_check_ready(&dev->ctrl, req, true)))
>   		return nvme_fail_nonready_command(&dev->ctrl, req);

The intent is to improve the observability in case controller was not 
ready, but the patch may need rework so that it does not cause the 
memory leak for this scenario.

Currently nvme_prep_rq() calls nvme_setup_cmd(), and in case failure 
occurs after that, it also does explicit cleanup with 
nvme_cleanup_cmd(). For discard request, this cleanup involves freeing 
memory that was allocated.

This patch moves nvme_setup_cmd() to caller (i.e. nvme_queue_rq), checks 
for controller not being ready and asks block-layer to retry without 
ever freeing the resource.


^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH] nvme: Move nvme_setup_cmd before hot_pathing
  2026-03-20  5:21 ` [PATCH] nvme: Move nvme_setup_cmd before hot_pathing 전민식
  2026-03-20 13:43   ` Kanchan Joshi
@ 2026-03-22  7:16   ` kernel test robot
  2026-03-22  8:22   ` kernel test robot
                     ` (2 subsequent siblings)
  4 siblings, 0 replies; 19+ messages in thread
From: kernel test robot @ 2026-03-22  7:16 UTC (permalink / raw)
  To: 전민식, kbusch@kernel.org, axboe@kernel.dk
  Cc: oe-kbuild-all, sven@kernel.org, j@jannau.net, neal@gompa.dev,
	hch@lst.de, sagi@grimberg.me, justin.tee@broadcom.com,
	nareshgottumukkala83@gmail.com, paul.ely@broadcom.com,
	kch@nvidia.com, linux-arm-kernel@lists.infradead.org,
	linux-nvme@lists.infradead.org, asahi@lists.linux.dev,
	linux-kernel@vger.kernel.org, 전민식

Hi 전민식,

kernel test robot noticed the following build warnings:

[auto build test WARNING on axboe/for-next]
[also build test WARNING on linux-nvme/for-next linus/master v6.16-rc1 next-20260320]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/nvme-Move-nvme_setup_cmd-before-hot_pathing/20260322-080947
base:   https://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux.git for-next
patch link:    https://lore.kernel.org/r/20260320052101epcms2p42ae135da60b36685e9b7fca6849b57a6%40epcms2p4
patch subject: [PATCH] nvme: Move nvme_setup_cmd before hot_pathing
config: x86_64-rhel-9.4 (https://download.01.org/0day-ci/archive/20260322/202603220813.hgb3EFi2-lkp@intel.com/config)
compiler: gcc-14 (Debian 14.2.0-19) 14.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20260322/202603220813.hgb3EFi2-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202603220813.hgb3EFi2-lkp@intel.com/

All warnings (new ones prefixed by >>):

   drivers/nvme/host/rdma.c: In function 'nvme_rdma_queue_rq':
>> drivers/nvme/host/rdma.c:2067:1: warning: label 'unmap_qe' defined but not used [-Wunused-label]
    2067 | unmap_qe:
         | ^~~~~~~~


vim +/unmap_qe +2067 drivers/nvme/host/rdma.c

71102307196028 Christoph Hellwig 2016-07-06  1991  
fc17b6534eb839 Christoph Hellwig 2017-06-03  1992  static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
71102307196028 Christoph Hellwig 2016-07-06  1993  		const struct blk_mq_queue_data *bd)
71102307196028 Christoph Hellwig 2016-07-06  1994  {
71102307196028 Christoph Hellwig 2016-07-06  1995  	struct nvme_ns *ns = hctx->queue->queuedata;
71102307196028 Christoph Hellwig 2016-07-06  1996  	struct nvme_rdma_queue *queue = hctx->driver_data;
71102307196028 Christoph Hellwig 2016-07-06  1997  	struct request *rq = bd->rq;
71102307196028 Christoph Hellwig 2016-07-06  1998  	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
71102307196028 Christoph Hellwig 2016-07-06  1999  	struct nvme_rdma_qe *sqe = &req->sqe;
f4b9e6c90c5725 Keith Busch       2021-03-17  2000  	struct nvme_command *c = nvme_req(rq)->cmd;
71102307196028 Christoph Hellwig 2016-07-06  2001  	struct ib_device *dev;
3bc32bb1186cca Christoph Hellwig 2018-06-11  2002  	bool queue_ready = test_bit(NVME_RDMA_Q_LIVE, &queue->flags);
fc17b6534eb839 Christoph Hellwig 2017-06-03  2003  	blk_status_t ret;
fc17b6534eb839 Christoph Hellwig 2017-06-03  2004  	int err;
71102307196028 Christoph Hellwig 2016-07-06  2005  
71102307196028 Christoph Hellwig 2016-07-06  2006  	WARN_ON_ONCE(rq->tag < 0);
71102307196028 Christoph Hellwig 2016-07-06  2007  
ae1c609d185960 전민식            2026-03-20  2008  	ret = nvme_setup_cmd(ns, rq);
ae1c609d185960 전민식            2026-03-20  2009  	if (ret)
ae1c609d185960 전민식            2026-03-20  2010  		return ret;
ae1c609d185960 전민식            2026-03-20  2011  
a97157440e1e69 Andy Chiu         2021-04-26  2012  	if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
a97157440e1e69 Andy Chiu         2021-04-26  2013  		return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
553cd9ef82edd8 Christoph Hellwig 2016-11-02  2014  
71102307196028 Christoph Hellwig 2016-07-06  2015  	dev = queue->device->dev;
62f99b62e5e3b8 Max Gurtovoy      2019-06-06  2016  
62f99b62e5e3b8 Max Gurtovoy      2019-06-06  2017  	req->sqe.dma = ib_dma_map_single(dev, req->sqe.data,
62f99b62e5e3b8 Max Gurtovoy      2019-06-06  2018  					 sizeof(struct nvme_command),
62f99b62e5e3b8 Max Gurtovoy      2019-06-06  2019  					 DMA_TO_DEVICE);
62f99b62e5e3b8 Max Gurtovoy      2019-06-06  2020  	err = ib_dma_mapping_error(dev, req->sqe.dma);
62f99b62e5e3b8 Max Gurtovoy      2019-06-06  2021  	if (unlikely(err))
62f99b62e5e3b8 Max Gurtovoy      2019-06-06  2022  		return BLK_STS_RESOURCE;
62f99b62e5e3b8 Max Gurtovoy      2019-06-06  2023  
71102307196028 Christoph Hellwig 2016-07-06  2024  	ib_dma_sync_single_for_cpu(dev, sqe->dma,
71102307196028 Christoph Hellwig 2016-07-06  2025  			sizeof(struct nvme_command), DMA_TO_DEVICE);
71102307196028 Christoph Hellwig 2016-07-06  2026  
6887fc6495f2df Sagi Grimberg     2022-10-03  2027  	nvme_start_request(rq);
71102307196028 Christoph Hellwig 2016-07-06  2028  
5ec5d3bddc6b91 Max Gurtovoy      2020-05-19  2029  	if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
5ec5d3bddc6b91 Max Gurtovoy      2020-05-19  2030  	    queue->pi_support &&
5ec5d3bddc6b91 Max Gurtovoy      2020-05-19  2031  	    (c->common.opcode == nvme_cmd_write ||
5ec5d3bddc6b91 Max Gurtovoy      2020-05-19  2032  	     c->common.opcode == nvme_cmd_read) &&
0372dd4e361717 Daniel Wagner     2023-12-18  2033  	    nvme_ns_has_pi(ns->head))
5ec5d3bddc6b91 Max Gurtovoy      2020-05-19  2034  		req->use_sig_mr = true;
5ec5d3bddc6b91 Max Gurtovoy      2020-05-19  2035  	else
5ec5d3bddc6b91 Max Gurtovoy      2020-05-19  2036  		req->use_sig_mr = false;
5ec5d3bddc6b91 Max Gurtovoy      2020-05-19  2037  
fc17b6534eb839 Christoph Hellwig 2017-06-03  2038  	err = nvme_rdma_map_data(queue, rq, c);
a7b7c7a105a528 Max Gurtovoy      2017-08-14  2039  	if (unlikely(err < 0)) {
71102307196028 Christoph Hellwig 2016-07-06  2040  		dev_err(queue->ctrl->ctrl.device,
fc17b6534eb839 Christoph Hellwig 2017-06-03  2041  			     "Failed to map data (%d)\n", err);
71102307196028 Christoph Hellwig 2016-07-06  2042  		goto err;
71102307196028 Christoph Hellwig 2016-07-06  2043  	}
71102307196028 Christoph Hellwig 2016-07-06  2044  
b4b591c87f2b0f Sagi Grimberg     2017-11-23  2045  	sqe->cqe.done = nvme_rdma_send_done;
b4b591c87f2b0f Sagi Grimberg     2017-11-23  2046  
71102307196028 Christoph Hellwig 2016-07-06  2047  	ib_dma_sync_single_for_device(dev, sqe->dma,
71102307196028 Christoph Hellwig 2016-07-06  2048  			sizeof(struct nvme_command), DMA_TO_DEVICE);
71102307196028 Christoph Hellwig 2016-07-06  2049  
fc17b6534eb839 Christoph Hellwig 2017-06-03  2050  	err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
f41725bbe16b07 Israel Rukshin    2017-11-26  2051  			req->mr ? &req->reg_wr.wr : NULL);
16686f3a6c3cd6 Max Gurtovoy      2019-10-13  2052  	if (unlikely(err))
16686f3a6c3cd6 Max Gurtovoy      2019-10-13  2053  		goto err_unmap;
71102307196028 Christoph Hellwig 2016-07-06  2054  
fc17b6534eb839 Christoph Hellwig 2017-06-03  2055  	return BLK_STS_OK;
62f99b62e5e3b8 Max Gurtovoy      2019-06-06  2056  
16686f3a6c3cd6 Max Gurtovoy      2019-10-13  2057  err_unmap:
16686f3a6c3cd6 Max Gurtovoy      2019-10-13  2058  	nvme_rdma_unmap_data(queue, rq);
71102307196028 Christoph Hellwig 2016-07-06  2059  err:
62eca39722fd99 Chao Leng         2021-02-01  2060  	if (err == -EIO)
62eca39722fd99 Chao Leng         2021-02-01  2061  		ret = nvme_host_path_error(rq);
62eca39722fd99 Chao Leng         2021-02-01  2062  	else if (err == -ENOMEM || err == -EAGAIN)
62f99b62e5e3b8 Max Gurtovoy      2019-06-06  2063  		ret = BLK_STS_RESOURCE;
62f99b62e5e3b8 Max Gurtovoy      2019-06-06  2064  	else
62f99b62e5e3b8 Max Gurtovoy      2019-06-06  2065  		ret = BLK_STS_IOERR;
16686f3a6c3cd6 Max Gurtovoy      2019-10-13  2066  	nvme_cleanup_cmd(rq);
62f99b62e5e3b8 Max Gurtovoy      2019-06-06 @2067  unmap_qe:
62f99b62e5e3b8 Max Gurtovoy      2019-06-06  2068  	ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command),
62f99b62e5e3b8 Max Gurtovoy      2019-06-06  2069  			    DMA_TO_DEVICE);
62f99b62e5e3b8 Max Gurtovoy      2019-06-06  2070  	return ret;
71102307196028 Christoph Hellwig 2016-07-06  2071  }
71102307196028 Christoph Hellwig 2016-07-06  2072  

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki


^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH] nvme: Move nvme_setup_cmd before hot_pathing
  2026-03-20  5:21 ` [PATCH] nvme: Move nvme_setup_cmd before hot_pathing 전민식
  2026-03-20 13:43   ` Kanchan Joshi
  2026-03-22  7:16   ` [PATCH] " kernel test robot
@ 2026-03-22  8:22   ` kernel test robot
  2026-03-22 12:09   ` kernel test robot
  2026-03-24 23:55   ` Justin Tee
  4 siblings, 0 replies; 19+ messages in thread
From: kernel test robot @ 2026-03-22  8:22 UTC (permalink / raw)
  To: 전민식, kbusch@kernel.org, axboe@kernel.dk
  Cc: llvm, oe-kbuild-all, sven@kernel.org, j@jannau.net,
	neal@gompa.dev, hch@lst.de, sagi@grimberg.me,
	justin.tee@broadcom.com, nareshgottumukkala83@gmail.com,
	paul.ely@broadcom.com, kch@nvidia.com,
	linux-arm-kernel@lists.infradead.org,
	linux-nvme@lists.infradead.org, asahi@lists.linux.dev,
	linux-kernel@vger.kernel.org, 전민식

Hi 전민식,

kernel test robot noticed the following build warnings:

[auto build test WARNING on axboe/for-next]
[also build test WARNING on linus/master v7.0-rc4 next-20260320]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/nvme-Move-nvme_setup_cmd-before-hot_pathing/20260322-080947
base:   https://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux.git for-next
patch link:    https://lore.kernel.org/r/20260320052101epcms2p42ae135da60b36685e9b7fca6849b57a6%40epcms2p4
patch subject: [PATCH] nvme: Move nvme_setup_cmd before hot_pathing
config: i386-randconfig-003-20260322 (https://download.01.org/0day-ci/archive/20260322/202603221611.fblmNLio-lkp@intel.com/config)
compiler: clang version 20.1.8 (https://github.com/llvm/llvm-project 87f0227cb60147a26a1eeb4fb06e3b505e9c7261)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20260322/202603221611.fblmNLio-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202603221611.fblmNLio-lkp@intel.com/

All warnings (new ones prefixed by >>):

>> drivers/nvme/host/rdma.c:2067:1: warning: unused label 'unmap_qe' [-Wunused-label]
    2067 | unmap_qe:
         | ^~~~~~~~~
   1 warning generated.


vim +/unmap_qe +2067 drivers/nvme/host/rdma.c

711023071960285 Christoph Hellwig 2016-07-06  1991  
fc17b6534eb8395 Christoph Hellwig 2017-06-03  1992  static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
711023071960285 Christoph Hellwig 2016-07-06  1993  		const struct blk_mq_queue_data *bd)
711023071960285 Christoph Hellwig 2016-07-06  1994  {
711023071960285 Christoph Hellwig 2016-07-06  1995  	struct nvme_ns *ns = hctx->queue->queuedata;
711023071960285 Christoph Hellwig 2016-07-06  1996  	struct nvme_rdma_queue *queue = hctx->driver_data;
711023071960285 Christoph Hellwig 2016-07-06  1997  	struct request *rq = bd->rq;
711023071960285 Christoph Hellwig 2016-07-06  1998  	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
711023071960285 Christoph Hellwig 2016-07-06  1999  	struct nvme_rdma_qe *sqe = &req->sqe;
f4b9e6c90c57251 Keith Busch       2021-03-17  2000  	struct nvme_command *c = nvme_req(rq)->cmd;
711023071960285 Christoph Hellwig 2016-07-06  2001  	struct ib_device *dev;
3bc32bb1186ccaf Christoph Hellwig 2018-06-11  2002  	bool queue_ready = test_bit(NVME_RDMA_Q_LIVE, &queue->flags);
fc17b6534eb8395 Christoph Hellwig 2017-06-03  2003  	blk_status_t ret;
fc17b6534eb8395 Christoph Hellwig 2017-06-03  2004  	int err;
711023071960285 Christoph Hellwig 2016-07-06  2005  
711023071960285 Christoph Hellwig 2016-07-06  2006  	WARN_ON_ONCE(rq->tag < 0);
711023071960285 Christoph Hellwig 2016-07-06  2007  
ae1c609d185960c 전민식            2026-03-20  2008  	ret = nvme_setup_cmd(ns, rq);
ae1c609d185960c 전민식            2026-03-20  2009  	if (ret)
ae1c609d185960c 전민식            2026-03-20  2010  		return ret;
ae1c609d185960c 전민식            2026-03-20  2011  
a97157440e1e69c Andy Chiu         2021-04-26  2012  	if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
a97157440e1e69c Andy Chiu         2021-04-26  2013  		return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
553cd9ef82edd81 Christoph Hellwig 2016-11-02  2014  
711023071960285 Christoph Hellwig 2016-07-06  2015  	dev = queue->device->dev;
62f99b62e5e3b88 Max Gurtovoy      2019-06-06  2016  
62f99b62e5e3b88 Max Gurtovoy      2019-06-06  2017  	req->sqe.dma = ib_dma_map_single(dev, req->sqe.data,
62f99b62e5e3b88 Max Gurtovoy      2019-06-06  2018  					 sizeof(struct nvme_command),
62f99b62e5e3b88 Max Gurtovoy      2019-06-06  2019  					 DMA_TO_DEVICE);
62f99b62e5e3b88 Max Gurtovoy      2019-06-06  2020  	err = ib_dma_mapping_error(dev, req->sqe.dma);
62f99b62e5e3b88 Max Gurtovoy      2019-06-06  2021  	if (unlikely(err))
62f99b62e5e3b88 Max Gurtovoy      2019-06-06  2022  		return BLK_STS_RESOURCE;
62f99b62e5e3b88 Max Gurtovoy      2019-06-06  2023  
711023071960285 Christoph Hellwig 2016-07-06  2024  	ib_dma_sync_single_for_cpu(dev, sqe->dma,
711023071960285 Christoph Hellwig 2016-07-06  2025  			sizeof(struct nvme_command), DMA_TO_DEVICE);
711023071960285 Christoph Hellwig 2016-07-06  2026  
6887fc6495f2dfd Sagi Grimberg     2022-10-03  2027  	nvme_start_request(rq);
711023071960285 Christoph Hellwig 2016-07-06  2028  
5ec5d3bddc6b912 Max Gurtovoy      2020-05-19  2029  	if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
5ec5d3bddc6b912 Max Gurtovoy      2020-05-19  2030  	    queue->pi_support &&
5ec5d3bddc6b912 Max Gurtovoy      2020-05-19  2031  	    (c->common.opcode == nvme_cmd_write ||
5ec5d3bddc6b912 Max Gurtovoy      2020-05-19  2032  	     c->common.opcode == nvme_cmd_read) &&
0372dd4e3617170 Daniel Wagner     2023-12-18  2033  	    nvme_ns_has_pi(ns->head))
5ec5d3bddc6b912 Max Gurtovoy      2020-05-19  2034  		req->use_sig_mr = true;
5ec5d3bddc6b912 Max Gurtovoy      2020-05-19  2035  	else
5ec5d3bddc6b912 Max Gurtovoy      2020-05-19  2036  		req->use_sig_mr = false;
5ec5d3bddc6b912 Max Gurtovoy      2020-05-19  2037  
fc17b6534eb8395 Christoph Hellwig 2017-06-03  2038  	err = nvme_rdma_map_data(queue, rq, c);
a7b7c7a105a528e Max Gurtovoy      2017-08-14  2039  	if (unlikely(err < 0)) {
711023071960285 Christoph Hellwig 2016-07-06  2040  		dev_err(queue->ctrl->ctrl.device,
fc17b6534eb8395 Christoph Hellwig 2017-06-03  2041  			     "Failed to map data (%d)\n", err);
711023071960285 Christoph Hellwig 2016-07-06  2042  		goto err;
711023071960285 Christoph Hellwig 2016-07-06  2043  	}
711023071960285 Christoph Hellwig 2016-07-06  2044  
b4b591c87f2b0f4 Sagi Grimberg     2017-11-23  2045  	sqe->cqe.done = nvme_rdma_send_done;
b4b591c87f2b0f4 Sagi Grimberg     2017-11-23  2046  
711023071960285 Christoph Hellwig 2016-07-06  2047  	ib_dma_sync_single_for_device(dev, sqe->dma,
711023071960285 Christoph Hellwig 2016-07-06  2048  			sizeof(struct nvme_command), DMA_TO_DEVICE);
711023071960285 Christoph Hellwig 2016-07-06  2049  
fc17b6534eb8395 Christoph Hellwig 2017-06-03  2050  	err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
f41725bbe16b077 Israel Rukshin    2017-11-26  2051  			req->mr ? &req->reg_wr.wr : NULL);
16686f3a6c3cd63 Max Gurtovoy      2019-10-13  2052  	if (unlikely(err))
16686f3a6c3cd63 Max Gurtovoy      2019-10-13  2053  		goto err_unmap;
711023071960285 Christoph Hellwig 2016-07-06  2054  
fc17b6534eb8395 Christoph Hellwig 2017-06-03  2055  	return BLK_STS_OK;
62f99b62e5e3b88 Max Gurtovoy      2019-06-06  2056  
16686f3a6c3cd63 Max Gurtovoy      2019-10-13  2057  err_unmap:
16686f3a6c3cd63 Max Gurtovoy      2019-10-13  2058  	nvme_rdma_unmap_data(queue, rq);
711023071960285 Christoph Hellwig 2016-07-06  2059  err:
62eca39722fd997 Chao Leng         2021-02-01  2060  	if (err == -EIO)
62eca39722fd997 Chao Leng         2021-02-01  2061  		ret = nvme_host_path_error(rq);
62eca39722fd997 Chao Leng         2021-02-01  2062  	else if (err == -ENOMEM || err == -EAGAIN)
62f99b62e5e3b88 Max Gurtovoy      2019-06-06  2063  		ret = BLK_STS_RESOURCE;
62f99b62e5e3b88 Max Gurtovoy      2019-06-06  2064  	else
62f99b62e5e3b88 Max Gurtovoy      2019-06-06  2065  		ret = BLK_STS_IOERR;
16686f3a6c3cd63 Max Gurtovoy      2019-10-13  2066  	nvme_cleanup_cmd(rq);
62f99b62e5e3b88 Max Gurtovoy      2019-06-06 @2067  unmap_qe:
62f99b62e5e3b88 Max Gurtovoy      2019-06-06  2068  	ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command),
62f99b62e5e3b88 Max Gurtovoy      2019-06-06  2069  			    DMA_TO_DEVICE);
62f99b62e5e3b88 Max Gurtovoy      2019-06-06  2070  	return ret;
711023071960285 Christoph Hellwig 2016-07-06  2071  }
711023071960285 Christoph Hellwig 2016-07-06  2072  

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki


^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH] nvme: Move nvme_setup_cmd before hot_pathing
  2026-03-20  5:21 ` [PATCH] nvme: Move nvme_setup_cmd before hot_pathing 전민식
                     ` (2 preceding siblings ...)
  2026-03-22  8:22   ` kernel test robot
@ 2026-03-22 12:09   ` kernel test robot
  2026-03-24 23:55   ` Justin Tee
  4 siblings, 0 replies; 19+ messages in thread
From: kernel test robot @ 2026-03-22 12:09 UTC (permalink / raw)
  To: 전민식, kbusch@kernel.org, axboe@kernel.dk
  Cc: oe-kbuild-all, sven@kernel.org, j@jannau.net, neal@gompa.dev,
	hch@lst.de, sagi@grimberg.me, justin.tee@broadcom.com,
	nareshgottumukkala83@gmail.com, paul.ely@broadcom.com,
	kch@nvidia.com, linux-arm-kernel@lists.infradead.org,
	linux-nvme@lists.infradead.org, asahi@lists.linux.dev,
	linux-kernel@vger.kernel.org, 전민식

Hi 전민식,

kernel test robot noticed the following build warnings:

[auto build test WARNING on axboe/for-next]
[also build test WARNING on linus/master v7.0-rc4 next-20260320]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/nvme-Move-nvme_setup_cmd-before-hot_pathing/20260322-080947
base:   https://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux.git for-next
patch link:    https://lore.kernel.org/r/20260320052101epcms2p42ae135da60b36685e9b7fca6849b57a6%40epcms2p4
patch subject: [PATCH] nvme: Move nvme_setup_cmd before hot_pathing
config: x86_64-rhel-9.4-kunit (https://download.01.org/0day-ci/archive/20260322/202603222033.ACImXpPE-lkp@intel.com/config)
compiler: gcc-14 (Debian 14.2.0-19) 14.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20260322/202603222033.ACImXpPE-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202603222033.ACImXpPE-lkp@intel.com/

All warnings (new ones prefixed by >>):

   drivers/nvme/host/rdma.c: In function 'nvme_rdma_queue_rq':
>> drivers/nvme/host/rdma.c:2067:1: warning: label 'unmap_qe' defined but not used [-Wunused-label]
    2067 | unmap_qe:
         | ^~~~~~~~


vim +/unmap_qe +2067 drivers/nvme/host/rdma.c

71102307196028 Christoph Hellwig 2016-07-06  1991  
fc17b6534eb839 Christoph Hellwig 2017-06-03  1992  static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
71102307196028 Christoph Hellwig 2016-07-06  1993  		const struct blk_mq_queue_data *bd)
71102307196028 Christoph Hellwig 2016-07-06  1994  {
71102307196028 Christoph Hellwig 2016-07-06  1995  	struct nvme_ns *ns = hctx->queue->queuedata;
71102307196028 Christoph Hellwig 2016-07-06  1996  	struct nvme_rdma_queue *queue = hctx->driver_data;
71102307196028 Christoph Hellwig 2016-07-06  1997  	struct request *rq = bd->rq;
71102307196028 Christoph Hellwig 2016-07-06  1998  	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
71102307196028 Christoph Hellwig 2016-07-06  1999  	struct nvme_rdma_qe *sqe = &req->sqe;
f4b9e6c90c5725 Keith Busch       2021-03-17  2000  	struct nvme_command *c = nvme_req(rq)->cmd;
71102307196028 Christoph Hellwig 2016-07-06  2001  	struct ib_device *dev;
3bc32bb1186cca Christoph Hellwig 2018-06-11  2002  	bool queue_ready = test_bit(NVME_RDMA_Q_LIVE, &queue->flags);
fc17b6534eb839 Christoph Hellwig 2017-06-03  2003  	blk_status_t ret;
fc17b6534eb839 Christoph Hellwig 2017-06-03  2004  	int err;
71102307196028 Christoph Hellwig 2016-07-06  2005  
71102307196028 Christoph Hellwig 2016-07-06  2006  	WARN_ON_ONCE(rq->tag < 0);
71102307196028 Christoph Hellwig 2016-07-06  2007  
ae1c609d185960 전민식            2026-03-20  2008  	ret = nvme_setup_cmd(ns, rq);
ae1c609d185960 전민식            2026-03-20  2009  	if (ret)
ae1c609d185960 전민식            2026-03-20  2010  		return ret;
ae1c609d185960 전민식            2026-03-20  2011  
a97157440e1e69 Andy Chiu         2021-04-26  2012  	if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
a97157440e1e69 Andy Chiu         2021-04-26  2013  		return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
553cd9ef82edd8 Christoph Hellwig 2016-11-02  2014  
71102307196028 Christoph Hellwig 2016-07-06  2015  	dev = queue->device->dev;
62f99b62e5e3b8 Max Gurtovoy      2019-06-06  2016  
62f99b62e5e3b8 Max Gurtovoy      2019-06-06  2017  	req->sqe.dma = ib_dma_map_single(dev, req->sqe.data,
62f99b62e5e3b8 Max Gurtovoy      2019-06-06  2018  					 sizeof(struct nvme_command),
62f99b62e5e3b8 Max Gurtovoy      2019-06-06  2019  					 DMA_TO_DEVICE);
62f99b62e5e3b8 Max Gurtovoy      2019-06-06  2020  	err = ib_dma_mapping_error(dev, req->sqe.dma);
62f99b62e5e3b8 Max Gurtovoy      2019-06-06  2021  	if (unlikely(err))
62f99b62e5e3b8 Max Gurtovoy      2019-06-06  2022  		return BLK_STS_RESOURCE;
62f99b62e5e3b8 Max Gurtovoy      2019-06-06  2023  
71102307196028 Christoph Hellwig 2016-07-06  2024  	ib_dma_sync_single_for_cpu(dev, sqe->dma,
71102307196028 Christoph Hellwig 2016-07-06  2025  			sizeof(struct nvme_command), DMA_TO_DEVICE);
71102307196028 Christoph Hellwig 2016-07-06  2026  
6887fc6495f2df Sagi Grimberg     2022-10-03  2027  	nvme_start_request(rq);
71102307196028 Christoph Hellwig 2016-07-06  2028  
5ec5d3bddc6b91 Max Gurtovoy      2020-05-19  2029  	if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
5ec5d3bddc6b91 Max Gurtovoy      2020-05-19  2030  	    queue->pi_support &&
5ec5d3bddc6b91 Max Gurtovoy      2020-05-19  2031  	    (c->common.opcode == nvme_cmd_write ||
5ec5d3bddc6b91 Max Gurtovoy      2020-05-19  2032  	     c->common.opcode == nvme_cmd_read) &&
0372dd4e361717 Daniel Wagner     2023-12-18  2033  	    nvme_ns_has_pi(ns->head))
5ec5d3bddc6b91 Max Gurtovoy      2020-05-19  2034  		req->use_sig_mr = true;
5ec5d3bddc6b91 Max Gurtovoy      2020-05-19  2035  	else
5ec5d3bddc6b91 Max Gurtovoy      2020-05-19  2036  		req->use_sig_mr = false;
5ec5d3bddc6b91 Max Gurtovoy      2020-05-19  2037  
fc17b6534eb839 Christoph Hellwig 2017-06-03  2038  	err = nvme_rdma_map_data(queue, rq, c);
a7b7c7a105a528 Max Gurtovoy      2017-08-14  2039  	if (unlikely(err < 0)) {
71102307196028 Christoph Hellwig 2016-07-06  2040  		dev_err(queue->ctrl->ctrl.device,
fc17b6534eb839 Christoph Hellwig 2017-06-03  2041  			     "Failed to map data (%d)\n", err);
71102307196028 Christoph Hellwig 2016-07-06  2042  		goto err;
71102307196028 Christoph Hellwig 2016-07-06  2043  	}
71102307196028 Christoph Hellwig 2016-07-06  2044  
b4b591c87f2b0f Sagi Grimberg     2017-11-23  2045  	sqe->cqe.done = nvme_rdma_send_done;
b4b591c87f2b0f Sagi Grimberg     2017-11-23  2046  
71102307196028 Christoph Hellwig 2016-07-06  2047  	ib_dma_sync_single_for_device(dev, sqe->dma,
71102307196028 Christoph Hellwig 2016-07-06  2048  			sizeof(struct nvme_command), DMA_TO_DEVICE);
71102307196028 Christoph Hellwig 2016-07-06  2049  
fc17b6534eb839 Christoph Hellwig 2017-06-03  2050  	err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
f41725bbe16b07 Israel Rukshin    2017-11-26  2051  			req->mr ? &req->reg_wr.wr : NULL);
16686f3a6c3cd6 Max Gurtovoy      2019-10-13  2052  	if (unlikely(err))
16686f3a6c3cd6 Max Gurtovoy      2019-10-13  2053  		goto err_unmap;
71102307196028 Christoph Hellwig 2016-07-06  2054  
fc17b6534eb839 Christoph Hellwig 2017-06-03  2055  	return BLK_STS_OK;
62f99b62e5e3b8 Max Gurtovoy      2019-06-06  2056  
16686f3a6c3cd6 Max Gurtovoy      2019-10-13  2057  err_unmap:
16686f3a6c3cd6 Max Gurtovoy      2019-10-13  2058  	nvme_rdma_unmap_data(queue, rq);
71102307196028 Christoph Hellwig 2016-07-06  2059  err:
62eca39722fd99 Chao Leng         2021-02-01  2060  	if (err == -EIO)
62eca39722fd99 Chao Leng         2021-02-01  2061  		ret = nvme_host_path_error(rq);
62eca39722fd99 Chao Leng         2021-02-01  2062  	else if (err == -ENOMEM || err == -EAGAIN)
62f99b62e5e3b8 Max Gurtovoy      2019-06-06  2063  		ret = BLK_STS_RESOURCE;
62f99b62e5e3b8 Max Gurtovoy      2019-06-06  2064  	else
62f99b62e5e3b8 Max Gurtovoy      2019-06-06  2065  		ret = BLK_STS_IOERR;
16686f3a6c3cd6 Max Gurtovoy      2019-10-13  2066  	nvme_cleanup_cmd(rq);
62f99b62e5e3b8 Max Gurtovoy      2019-06-06 @2067  unmap_qe:
62f99b62e5e3b8 Max Gurtovoy      2019-06-06  2068  	ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command),
62f99b62e5e3b8 Max Gurtovoy      2019-06-06  2069  			    DMA_TO_DEVICE);
62f99b62e5e3b8 Max Gurtovoy      2019-06-06  2070  	return ret;
71102307196028 Christoph Hellwig 2016-07-06  2071  }
71102307196028 Christoph Hellwig 2016-07-06  2072  

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH v2] nvme: Move nvme_setup_cmd before hot_pathing
  2026-03-20 13:43   ` Kanchan Joshi
@ 2026-03-23  1:04     ` 전민식
  0 siblings, 0 replies; 19+ messages in thread
From: 전민식 @ 2026-03-23  1:04 UTC (permalink / raw)
  To: 칸찬, kbusch@kernel.org, axboe@kernel.dk
  Cc: sven@kernel.org, j@jannau.net, neal@gompa.dev, hch@lst.de,
	sagi@grimberg.me, justin.tee@broadcom.com,
	nareshgottumukkala83@gmail.com, paul.ely@broadcom.com,
	kch@nvidia.com, linux-arm-kernel@lists.infradead.org,
	linux-nvme@lists.infradead.org, asahi@lists.linux.dev,
	linux-kernel@vger.kernel.org, 전민식

Hi kanchan


The reason for calling nvme_cleanup_cmd in the nvme_prep_rq function 
is when it fails after calling the nvme_map_data function.
Therefore, the memory leak does not occur. Did I miss anything?

Additionally, the patch v2 that modifies the warrning detected by
kernel test robot.

>> drivers/nvme/host/rdma.c:2067:1: warning: label 'unmap_qe' defined but not used [-Wunused-label]
    2067 | unmap_qe:
         | ^~~~~~~~


From 53db77a37bbcdc8f8bc1a61452a0586d195ce8da Mon Sep 17 00:00:00 2001
From: Minsik Jeon <hmi.jeon@samsung.com>
Date: Fri, 20 Mar 2026 14:48:07 +0900
Subject: [PATCH v2] nvme: Move nvme_setup_cmd before hot_pathing

we were checking host_pathing_error before calling nvme_setup_cmd().
This is caused the command setup to be skipped entirely when a pathing
error occurred, making it impossible to trace the nvme command via
trace_cmd nvme_complete_rq().

As a result, when nvme_complete_rq() logged a completion with cmdid=0,
it was impossible to correlate the completion with the nvme command
request.

This patch reorders the logic to first call nvme_setup_cmd(), then
perform the host_pathing_error check.

Co-authored-by: Beomsoo Kim <beomsooo.kim@samsung.com>
Co-authored-by: Eunsoo Lee <euns212.lee@samsung.com>
Co-authored-by: Steven Seungcheol Lee <sc108.lee@samsung.com>
Signed-off-by: Minsik Jeon <hmi.jeon@samsung.com>
---
 drivers/nvme/host/apple.c  |  6 +++---
 drivers/nvme/host/fc.c     |  8 ++++----
 drivers/nvme/host/pci.c    |  8 ++++----
 drivers/nvme/host/rdma.c   | 10 +++++-----
 drivers/nvme/host/tcp.c    |  8 ++++----
 drivers/nvme/target/loop.c |  6 +++---
 6 files changed, 23 insertions(+), 23 deletions(-)

diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index ed61b97fde59..2a28c992d024 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -783,13 +783,13 @@ static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
 	if (unlikely(!READ_ONCE(q->enabled)))
 		return BLK_STS_IOERR;
 
-	if (!nvme_check_ready(&anv->ctrl, req, true))
-		return nvme_fail_nonready_command(&anv->ctrl, req);
-
 	ret = nvme_setup_cmd(ns, req);
 	if (ret)
 		return ret;
 
+	if (!nvme_check_ready(&anv->ctrl, req, true))
+		return nvme_fail_nonready_command(&anv->ctrl, req);
+
 	if (blk_rq_nr_phys_segments(req)) {
 		ret = apple_nvme_map_data(anv, req, cmnd);
 		if (ret)
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index e1bb4707183c..8ea37102a836 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2762,14 +2762,14 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
 	u32 data_len;
 	blk_status_t ret;
 
-	if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
-	    !nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
-		return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
-
 	ret = nvme_setup_cmd(ns, rq);
 	if (ret)
 		return ret;
 
+	if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
+	    !nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
+		return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
+
 	/*
 	 * nvme core doesn't quite treat the rq opaquely. Commands such
 	 * as WRITE ZEROES will return a non-zero rq payload_bytes yet
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index b78ba239c8ea..ad0363f7e681 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1376,10 +1376,6 @@ static blk_status_t nvme_prep_rq(struct request *req)
 	iod->meta_total_len = 0;
 	iod->nr_dma_vecs = 0;
 
-	ret = nvme_setup_cmd(req->q->queuedata, req);
-	if (ret)
-		return ret;
-
 	if (blk_rq_nr_phys_segments(req)) {
 		ret = nvme_map_data(req);
 		if (ret)
@@ -1418,6 +1414,10 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
 	if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
 		return BLK_STS_IOERR;
 
+	ret = nvme_setup_cmd(req->q->queuedata, req);
+	if (ret)
+		return ret;
+
 	if (unlikely(!nvme_check_ready(&dev->ctrl, req, true)))
 		return nvme_fail_nonready_command(&dev->ctrl, req);
 
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 57111139e84f..36f1a5a41ed9 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -2005,6 +2005,10 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
 
 	WARN_ON_ONCE(rq->tag < 0);
 
+	ret = nvme_setup_cmd(ns, rq);
+	if (ret)
+		return ret;
+
 	if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
 		return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
 
@@ -2020,10 +2024,6 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
 	ib_dma_sync_single_for_cpu(dev, sqe->dma,
 			sizeof(struct nvme_command), DMA_TO_DEVICE);
 
-	ret = nvme_setup_cmd(ns, rq);
-	if (ret)
-		goto unmap_qe;
-
 	nvme_start_request(rq);
 
 	if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
@@ -2064,7 +2064,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
 	else
 		ret = BLK_STS_IOERR;
 	nvme_cleanup_cmd(rq);
-unmap_qe:
+
 	ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command),
 			    DMA_TO_DEVICE);
 	return ret;
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 243dab830dc8..1a3640e81b8f 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2705,10 +2705,6 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
 	u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
 	blk_status_t ret;
 
-	ret = nvme_setup_cmd(ns, rq);
-	if (ret)
-		return ret;
-
 	req->state = NVME_TCP_SEND_CMD_PDU;
 	req->status = cpu_to_le16(NVME_SC_SUCCESS);
 	req->offset = 0;
@@ -2768,6 +2764,10 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
 	bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
 	blk_status_t ret;
 
+	ret = nvme_setup_cmd(ns, rq);
+	if (ret)
+		return ret;
+
 	if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
 		return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
 
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 4b3f4f11928d..475b532d08e8 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -140,13 +140,13 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
 	bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags);
 	blk_status_t ret;
 
-	if (!nvme_check_ready(&queue->ctrl->ctrl, req, queue_ready))
-		return nvme_fail_nonready_command(&queue->ctrl->ctrl, req);
-
 	ret = nvme_setup_cmd(ns, req);
 	if (ret)
 		return ret;
 
+	if (!nvme_check_ready(&queue->ctrl->ctrl, req, queue_ready))
+		return nvme_fail_nonready_command(&queue->ctrl->ctrl, req);
+
 	nvme_start_request(req);
 	iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
 	iod->req.port = queue->ctrl->port;
-- 
2.52.0



^ permalink raw reply related	[flat|nested] 19+ messages in thread

* Re: [PATCH] nvme: Move nvme_setup_cmd before hot_pathing
  2026-03-20  5:21 ` [PATCH] nvme: Move nvme_setup_cmd before hot_pathing 전민식
                     ` (3 preceding siblings ...)
  2026-03-22 12:09   ` kernel test robot
@ 2026-03-24 23:55   ` Justin Tee
  2026-03-25  6:33     ` [PATCH v3] nvme: Add nvme_setup_cmd to host_path_error 전민식
  4 siblings, 1 reply; 19+ messages in thread
From: Justin Tee @ 2026-03-24 23:55 UTC (permalink / raw)
  To: hmi.jeon, kbusch@kernel.org, axboe@kernel.dk
  Cc: sven@kernel.org, j@jannau.net, neal@gompa.dev, hch@lst.de,
	sagi@grimberg.me, justin.tee@broadcom.com,
	nareshgottumukkala83@gmail.com, paul.ely@broadcom.com,
	kch@nvidia.com, linux-arm-kernel@lists.infradead.org,
	linux-nvme@lists.infradead.org, asahi@lists.linux.dev,
	linux-kernel@vger.kernel.org

Hi Minsik,

 > diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
 > index e1bb4707183c..8ea37102a836 100644
 > --- a/drivers/nvme/host/fc.c
 > +++ b/drivers/nvme/host/fc.c
 > @@ -2762,14 +2762,14 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
 >         u32 data_len;
 >         blk_status_t ret;
 >
 > -       if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
 > -           !nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
 > -               return nvme_fail_nonready_command(&queue->ctrl->ctrl, 
rq);
 > -
 >         ret = nvme_setup_cmd(ns, rq);
 >         if (ret)
 >                 return ret;
 >
 > +       if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
 > +           !nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
 > +               return nvme_fail_nonready_command(&queue->ctrl->ctrl, 
rq);
 > +

__nvme_check_ready() checks for (nvme_req(req)->flags & 
NVME_REQ_USERCMD).  In nvme_setup_cmd(), nvme_clear_nvme_request() 
clears the nvme_req(req)->flags when RQF_DONTPREP is not set.

Is it possible that this patch would have nvme_setup_cmd() erase a 
nvme_req(req)’s NVME_REQ_USERCMD flag before __nvme_check_ready() is 
called for each respective .queue_rq?

Regards,
Justin Tee


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH v3] nvme: Add nvme_setup_cmd to host_path_error
  2026-03-24 23:55   ` Justin Tee
@ 2026-03-25  6:33     ` 전민식
  2026-03-25 18:37       ` Justin Tee
  0 siblings, 1 reply; 19+ messages in thread
From: 전민식 @ 2026-03-25  6:33 UTC (permalink / raw)
  To: Justin Tee, kbusch@kernel.org, axboe@kernel.dk
  Cc: sven@kernel.org, j@jannau.net, neal@gompa.dev, hch@lst.de,
	sagi@grimberg.me, justin.tee@broadcom.com,
	nareshgottumukkala83@gmail.com, paul.ely@broadcom.com,
	kch@nvidia.com, linux-arm-kernel@lists.infradead.org,
	linux-nvme@lists.infradead.org, asahi@lists.linux.dev,
	linux-kernel@vger.kernel.org

Hi Justin,


After your feedback, I found a simpler approach.
I've modified the patch to add nvme_setup_cmd() inside 
host_path_error(). I think this patch won't cause side effect.

Please leave a review. Thank you.

- Before
nvme_complete_rq: \
nvme0: qid=0, cmdid=0, res=0x0, retries=0, flags=0x2, status=0x370

- After
nvme_setup_cmd: \
nvme0: qid=0, cmdid=32777, nsid=0, flags=0x0, meta=0x0, \
cmd=(nvme_admin_identify cns=1, ctrlid=0)
nvme_complete_rq: \
nvme0: qid=0, cmdid=32777, res=0x0, retries=0, flags=0x2, status=0x370

Regards,
Minsik Jeon



From 98af885be8314dc9093983c531fc36b454b78e81 Mon Sep 17 00:00:00 2001
From: Minsik Jeon <hmi.jeon@samsung.com>
Date: Wed, 25 Mar 2026 15:58:20 +0900
Subject: [PATCH v3] nvme: Add nvme_setup_cmd to host_path_error

we were checking host_pathing_error before calling nvme_setup_cmd().
This is caused the command setup to be skipped entirely when a pathing
error occurred, making it impossible to trace the nvme command via
trace_cmd nvme_complete_rq().

As a result, when nvme_complete_rq() logged a completion with cmdid=0,
it was impossible to correlate the completion with the nvme command
request.

This patch add nvme_setup_cmd() to nvme_host_path_error().

- Before
nvme_complete_rq: \
nvme0: qid=0, cmdid=0, res=0x0, retries=0, flags=0x2, status=0x370

- After
nvme_setup_cmd: \
nvme0: qid=0, cmdid=32777, nsid=0, flags=0x0, meta=0x0, \
cmd=(nvme_admin_identify cns=1, ctrlid=0)
nvme_complete_rq: \
nvme0: qid=0, cmdid=32777, res=0x0, retries=0, flags=0x2, status=0x370

Co-authored-by: Beomsoo Kim <beomsooo.kim@samsung.com>
Co-authored-by: Eunsoo Lee <euns212.lee@samsung.com>
Co-authored-by: Steven Seungcheol Lee <sc108.lee@samsung.com>
Signed-off-by: Minsik Jeon <hmi.jeon@samsung.com>
---
 drivers/nvme/host/core.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 766e9cc4ffca..378d28b2c971 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -512,6 +512,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_batch_req);
 blk_status_t nvme_host_path_error(struct request *req)
 {
 	nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
+	nvme_setup_cmd(req->q->queuedata, req);
 	blk_mq_set_request_complete(req);
 	nvme_complete_rq(req);
 	return BLK_STS_OK;
-- 
2.52.0



^ permalink raw reply related	[flat|nested] 19+ messages in thread

* Re: [PATCH v3] nvme: Add nvme_setup_cmd to host_path_error
  2026-03-25  6:33     ` [PATCH v3] nvme: Add nvme_setup_cmd to host_path_error 전민식
@ 2026-03-25 18:37       ` Justin Tee
  2026-03-25 19:03         ` Keith Busch
  0 siblings, 1 reply; 19+ messages in thread
From: Justin Tee @ 2026-03-25 18:37 UTC (permalink / raw)
  To: hmi.jeon
  Cc: kbusch@kernel.org, axboe@kernel.dk, sven@kernel.org, j@jannau.net,
	neal@gompa.dev, hch@lst.de, sagi@grimberg.me,
	justin.tee@broadcom.com, nareshgottumukkala83@gmail.com,
	paul.ely@broadcom.com, James Smart, kch@nvidia.com,
	linux-arm-kernel@lists.infradead.org,
	linux-nvme@lists.infradead.org, asahi@lists.linux.dev,
	linux-kernel@vger.kernel.org

Hi Minsik,

> - After
> nvme_setup_cmd: \
> nvme0: qid=0, cmdid=32777, nsid=0, flags=0x0, meta=0x0, \
> cmd=(nvme_admin_identify cns=1, ctrlid=0)
> nvme_complete_rq: \
> nvme0: qid=0, cmdid=32777, res=0x0, retries=0, flags=0x2, status=0x370

> diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
> index 766e9cc4ffca..378d28b2c971 100644
> --- a/drivers/nvme/host/core.c
> +++ b/drivers/nvme/host/core.c
> @@ -512,6 +512,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_batch_req);
> blk_status_t nvme_host_path_error(struct request *req)
> {
>         nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
> +       nvme_setup_cmd(req->q->queuedata, req);
>         blk_mq_set_request_complete(req);
>         nvme_complete_rq(req);
>         return BLK_STS_OK;

Since trace_nvme_complete_rq is printing only cmdid to help identify
the command, why not define a new TRACE_EVENT(nvme_host_path_error,
...) in trace.h instead?

So, perhaps something like this?

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 5037444687bd..a2e253dbb744 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -511,7 +511,11 @@ EXPORT_SYMBOL_GPL(nvme_complete_batch_req);
  */
 blk_status_t nvme_host_path_error(struct request *req)
 {
+       struct nvme_command *cmd = nvme_req(req)->cmd;
+
        nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
+       cmd->common.command_id = nvme_cid(req);
+       trace_nvme_host_path_error(req, cmd);
        blk_mq_set_request_complete(req);
        nvme_complete_rq(req);
        return BLK_STS_OK;

Regards,
Justin


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* Re: [PATCH v3] nvme: Add nvme_setup_cmd to host_path_error
  2026-03-25 18:37       ` Justin Tee
@ 2026-03-25 19:03         ` Keith Busch
  2026-03-26  1:44           ` [PATCH v4] nvme: Skip trace complete_rq on host path error 전민식
  0 siblings, 1 reply; 19+ messages in thread
From: Keith Busch @ 2026-03-25 19:03 UTC (permalink / raw)
  To: Justin Tee
  Cc: hmi.jeon, axboe@kernel.dk, sven@kernel.org, j@jannau.net,
	neal@gompa.dev, hch@lst.de, sagi@grimberg.me,
	justin.tee@broadcom.com, nareshgottumukkala83@gmail.com,
	paul.ely@broadcom.com, James Smart, kch@nvidia.com,
	linux-arm-kernel@lists.infradead.org,
	linux-nvme@lists.infradead.org, asahi@lists.linux.dev,
	linux-kernel@vger.kernel.org

On Wed, Mar 25, 2026 at 11:37:44AM -0700, Justin Tee wrote:
> > - After
> > nvme_setup_cmd: \
> > nvme0: qid=0, cmdid=32777, nsid=0, flags=0x0, meta=0x0, \
> > cmd=(nvme_admin_identify cns=1, ctrlid=0)
> > nvme_complete_rq: \
> > nvme0: qid=0, cmdid=32777, res=0x0, retries=0, flags=0x2, status=0x370
> 
> > diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
> > index 766e9cc4ffca..378d28b2c971 100644
> > --- a/drivers/nvme/host/core.c
> > +++ b/drivers/nvme/host/core.c
> > @@ -512,6 +512,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_batch_req);
> > blk_status_t nvme_host_path_error(struct request *req)
> > {
> >         nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
> > +       nvme_setup_cmd(req->q->queuedata, req);
> >         blk_mq_set_request_complete(req);
> >         nvme_complete_rq(req);
> >         return BLK_STS_OK;
> 
> Since trace_nvme_complete_rq is printing only cmdid to help identify
> the command, why not define a new TRACE_EVENT(nvme_host_path_error,
> ...) in trace.h instead?

Why are we even tracing the completion? I agree the completion without a
submission is confusing, but why don't we just skip tracing the
completion in this condition? The idea for these trace events was to
match up commands dispatched to hardware with the hardware's
posted response, so I'm not sure what value we get by synthesizing both
sides of the events.


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH v4] nvme: Skip trace complete_rq on host path error
  2026-03-25 19:03         ` Keith Busch
@ 2026-03-26  1:44           ` 전민식
  2026-03-26  6:14             ` hch
  0 siblings, 1 reply; 19+ messages in thread
From: 전민식 @ 2026-03-26  1:44 UTC (permalink / raw)
  To: Keith Busch, Justin Tee
  Cc: axboe@kernel.dk, sven@kernel.org, j@jannau.net, neal@gompa.dev,
	hch@lst.de, sagi@grimberg.me, justin.tee@broadcom.com,
	nareshgottumukkala83@gmail.com, paul.ely@broadcom.com,
	James Smart, kch@nvidia.com, linux-arm-kernel@lists.infradead.org,
	linux-nvme@lists.infradead.org, asahi@lists.linux.dev,
	linux-kernel@vger.kernel.org, 이은수,
	전민식, 칸찬

Hi Keith Busch,

If it's host pathing error, Do you mean to skip `trace_nvme_complete_rq()`?
If so, I agree with this approach.

Thank you

Regards,
Minsik Jeon


From c1f3c209c35233154bf5918092bdbb085828db23 Mon Sep 17 00:00:00 2001
From: Minsik Jeon <hmi.jeon@samsung.com>
Date: Thu, 26 Mar 2026 11:09:57 +0900
Subject: [PATCH v4] nvme: Skip trace complete_rq on host path error

we were checking host_pathing_error before calling nvme_setup_cmd().
This is caused the command setup to be skipped entirely when a pathing
error occurred, making it impossible to trace the nvme command via
trace_cmd nvme_complete_rq().

As a result, when nvme_complete_rq() logged a completion with cmdid=0,
it was impossible to correlate the completion with the nvme command
request.

This patch Skip trace_nvme_complete_rq() on NVMe host path error.

Co-authored-by: Beomsoo Kim <beomsooo.kim@samsung.com>
Co-authored-by: Eunsoo Lee <euns212.lee@samsung.com>
Co-authored-by: Steven Seungcheol Lee <sc108.lee@samsung.com>
Signed-off-by: Minsik Jeon <hmi.jeon@samsung.com>
---
 drivers/nvme/host/core.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 766e9cc4ffca..8f98d5220206 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -458,7 +458,9 @@ void nvme_complete_rq(struct request *req)
 {
 	struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
 
-	trace_nvme_complete_rq(req);
+	if (nvme_req(req)->status != NVME_SC_HOST_PATH_ERROR)
+		trace_nvme_complete_rq(req);
+
 	nvme_cleanup_cmd(req);
 
 	/*
-- 
2.52.0



^ permalink raw reply related	[flat|nested] 19+ messages in thread

* Re: [PATCH v4] nvme: Skip trace complete_rq on host path error
  2026-03-26  1:44           ` [PATCH v4] nvme: Skip trace complete_rq on host path error 전민식
@ 2026-03-26  6:14             ` hch
  2026-03-26  6:51               ` [PATCH v5] " 전민식
  0 siblings, 1 reply; 19+ messages in thread
From: hch @ 2026-03-26  6:14 UTC (permalink / raw)
  To: 전민식
  Cc: Keith Busch, Justin Tee, axboe@kernel.dk, sven@kernel.org,
	j@jannau.net, neal@gompa.dev, hch@lst.de, sagi@grimberg.me,
	justin.tee@broadcom.com, nareshgottumukkala83@gmail.com,
	paul.ely@broadcom.com, James Smart, kch@nvidia.com,
	linux-arm-kernel@lists.infradead.org,
	linux-nvme@lists.infradead.org, asahi@lists.linux.dev,
	linux-kernel@vger.kernel.org, 이은수,
	칸찬

> -	trace_nvme_complete_rq(req);
> +	if (nvme_req(req)->status != NVME_SC_HOST_PATH_ERROR)
> +		trace_nvme_complete_rq(req);
> +

Please add a comment here explaining why we need the check.



^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH v5] nvme: Skip trace complete_rq on host path error
  2026-03-26  6:14             ` hch
@ 2026-03-26  6:51               ` 전민식
  2026-03-26 14:20                 ` hch
  2026-03-26 14:28                 ` Keith Busch
  0 siblings, 2 replies; 19+ messages in thread
From: 전민식 @ 2026-03-26  6:51 UTC (permalink / raw)
  To: hch@lst.de
  Cc: Keith Busch, Justin Tee, axboe@kernel.dk, sven@kernel.org,
	j@jannau.net, neal@gompa.dev, sagi@grimberg.me,
	justin.tee@broadcom.com, nareshgottumukkala83@gmail.com,
	paul.ely@broadcom.com, James Smart, kch@nvidia.com,
	linux-arm-kernel@lists.infradead.org,
	linux-nvme@lists.infradead.org, asahi@lists.linux.dev,
	linux-kernel@vger.kernel.org, 이은수,
	칸찬, 전민식

Hi hch,

I added a comment about why I do trace skip if it's host path error.


Thanks

Regards,
Minsik Jeon


From 3ee001c00cb4c7843d7bbb0c11fcc1fafeded9b4 Mon Sep 17 00:00:00 2001
From: Minsik Jeon <hmi.jeon@samsung.com>
Date: Thu, 26 Mar 2026 11:09:57 +0900
Subject: [PATCH v5] nvme: Skip trace complete_rq on host path error

we were checking host_pathing_error before calling nvme_setup_cmd().
This is caused the command setup to be skipped entirely when a pathing
error occurred, making it impossible to trace the nvme command via
trace_cmd nvme_complete_rq().

As a result, when nvme_complete_rq() logged a completion with cmdid=0,
it was impossible to correlate the completion with the nvme command
request.

This patch Skip trace_nvme_complete_rq() on NVMe host path error.

Co-authored-by: Beomsoo Kim <beomsooo.kim@samsung.com>
Co-authored-by: Eunsoo Lee <euns212.lee@samsung.com>
Co-authored-by: Steven Seungcheol Lee <sc108.lee@samsung.com>
Signed-off-by: Minsik Jeon <hmi.jeon@samsung.com>
---
 drivers/nvme/host/core.c | 9 ++++++++-
 1 file changed, 8 insertions(+), 1 deletion(-)

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 766e9cc4ffca..df5a47b8560d 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -458,7 +458,14 @@ void nvme_complete_rq(struct request *req)
 {
 	struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
 
-	trace_nvme_complete_rq(req);
+	/*
+	 * The idea for these trace events was to match up commands
+	 * dispatched to hardware with the hardware's posted response.
+	 * So skip tracing for undispatched commands.
+	 */
+	if (nvme_req(req)->status != NVME_SC_HOST_PATH_ERROR)
+		trace_nvme_complete_rq(req);
+
 	nvme_cleanup_cmd(req);
 
 	/*
-- 
2.52.0



^ permalink raw reply related	[flat|nested] 19+ messages in thread

* Re: [PATCH v5] nvme: Skip trace complete_rq on host path error
  2026-03-26  6:51               ` [PATCH v5] " 전민식
@ 2026-03-26 14:20                 ` hch
  2026-03-26 14:28                 ` Keith Busch
  1 sibling, 0 replies; 19+ messages in thread
From: hch @ 2026-03-26 14:20 UTC (permalink / raw)
  To: 전민식
  Cc: hch@lst.de, Keith Busch, Justin Tee, axboe@kernel.dk,
	sven@kernel.org, j@jannau.net, neal@gompa.dev, sagi@grimberg.me,
	justin.tee@broadcom.com, nareshgottumukkala83@gmail.com,
	paul.ely@broadcom.com, James Smart, kch@nvidia.com,
	linux-arm-kernel@lists.infradead.org,
	linux-nvme@lists.infradead.org, asahi@lists.linux.dev,
	linux-kernel@vger.kernel.org, 이은수,
	칸찬

On Thu, Mar 26, 2026 at 03:51:52PM +0900, 전민식 wrote:
> Hi hch,
> 
> I added a comment about why I do trace skip if it's host path error.

The patch looks good:

Reviewed-by: Christoph Hellwig <hch@lst.de>

But such note go below the '--' so that don't end up in git history.



^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH v5] nvme: Skip trace complete_rq on host path error
  2026-03-26  6:51               ` [PATCH v5] " 전민식
  2026-03-26 14:20                 ` hch
@ 2026-03-26 14:28                 ` Keith Busch
  2026-03-26 14:31                   ` hch
  1 sibling, 1 reply; 19+ messages in thread
From: Keith Busch @ 2026-03-26 14:28 UTC (permalink / raw)
  To: 전민식
  Cc: hch@lst.de, Justin Tee, axboe@kernel.dk, sven@kernel.org,
	j@jannau.net, neal@gompa.dev, sagi@grimberg.me,
	justin.tee@broadcom.com, nareshgottumukkala83@gmail.com,
	paul.ely@broadcom.com, James Smart, kch@nvidia.com,
	linux-arm-kernel@lists.infradead.org,
	linux-nvme@lists.infradead.org, asahi@lists.linux.dev,
	linux-kernel@vger.kernel.org, 이은수,
	칸찬

On Thu, Mar 26, 2026 at 03:51:52PM +0900, 전민식 wrote:
>  {
>  	struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
>  
> -	trace_nvme_complete_rq(req);
> +	/*
> +	 * The idea for these trace events was to match up commands
> +	 * dispatched to hardware with the hardware's posted response.
> +	 * So skip tracing for undispatched commands.
> +	 */
> +	if (nvme_req(req)->status != NVME_SC_HOST_PATH_ERROR)
> +		trace_nvme_complete_rq(req);
> +

Well, how do we know a controller doesnn't actually return that status
code? I was just suggesting to skip the trace for the condition we never
dispatched the command. An added bonus is we don't need a mostly
unnecessary 'if' check on every IO.

---
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index f5ebcaa2f859c..0dcccdca2965e 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -454,11 +454,10 @@ void nvme_end_req(struct request *req)
 	blk_mq_end_request(req, status);
 }
 
-void nvme_complete_rq(struct request *req)
+static void __nvme_complete_rq(struct request *req)
 {
 	struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
 
-	trace_nvme_complete_rq(req);
 	nvme_cleanup_cmd(req);
 
 	/*
@@ -493,6 +492,12 @@ void nvme_complete_rq(struct request *req)
 		return;
 	}
 }
+
+void nvme_complete_rq(struct request *req)
+{
+	trace_nvme_complete_rq(req);
+	__nvme_complete_rq(req);
+}
 EXPORT_SYMBOL_GPL(nvme_complete_rq);
 
 void nvme_complete_batch_req(struct request *req)
@@ -513,7 +518,7 @@ blk_status_t nvme_host_path_error(struct request *req)
 {
 	nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
 	blk_mq_set_request_complete(req);
-	nvme_complete_rq(req);
+	__nvme_complete_rq(req);
 	return BLK_STS_OK;
 }
 EXPORT_SYMBOL_GPL(nvme_host_path_error);
--


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* Re: [PATCH v5] nvme: Skip trace complete_rq on host path error
  2026-03-26 14:28                 ` Keith Busch
@ 2026-03-26 14:31                   ` hch
  2026-03-26 14:43                     ` Keith Busch
  0 siblings, 1 reply; 19+ messages in thread
From: hch @ 2026-03-26 14:31 UTC (permalink / raw)
  To: Keith Busch
  Cc: 전민식, hch@lst.de, Justin Tee, axboe@kernel.dk,
	sven@kernel.org, j@jannau.net, neal@gompa.dev, sagi@grimberg.me,
	justin.tee@broadcom.com, nareshgottumukkala83@gmail.com,
	paul.ely@broadcom.com, James Smart, kch@nvidia.com,
	linux-arm-kernel@lists.infradead.org,
	linux-nvme@lists.infradead.org, asahi@lists.linux.dev,
	linux-kernel@vger.kernel.org, 이은수,
	칸찬

On Thu, Mar 26, 2026 at 08:28:46AM -0600, Keith Busch wrote:
> On Thu, Mar 26, 2026 at 03:51:52PM +0900, 전민식 wrote:
> >  {
> >  	struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
> >  
> > -	trace_nvme_complete_rq(req);
> > +	/*
> > +	 * The idea for these trace events was to match up commands
> > +	 * dispatched to hardware with the hardware's posted response.
> > +	 * So skip tracing for undispatched commands.
> > +	 */
> > +	if (nvme_req(req)->status != NVME_SC_HOST_PATH_ERROR)
> > +		trace_nvme_complete_rq(req);
> > +
> 
> Well, how do we know a controller doesnn't actually return that status
> code? I was just suggesting to skip the trace for the condition we never
> dispatched the command. An added bonus is we don't need a mostly
> unnecessary 'if' check on every IO.

The 7?h error values were added for host use.  The description of
the section in the spec suggests this, but isn't actually as clear
as I would like it.  I wonder if we need to verify that controller
don't incorrectly return it, as that could cause some problems?

Independent of that your patch below looks sane to me.



^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH v5] nvme: Skip trace complete_rq on host path error
  2026-03-26 14:31                   ` hch
@ 2026-03-26 14:43                     ` Keith Busch
  2026-03-27  6:19                       ` 전민식
  2026-03-27  6:37                       ` 전민식
  0 siblings, 2 replies; 19+ messages in thread
From: Keith Busch @ 2026-03-26 14:43 UTC (permalink / raw)
  To: hch@lst.de
  Cc: 전민식, Justin Tee, axboe@kernel.dk,
	sven@kernel.org, j@jannau.net, neal@gompa.dev, sagi@grimberg.me,
	justin.tee@broadcom.com, nareshgottumukkala83@gmail.com,
	paul.ely@broadcom.com, James Smart, kch@nvidia.com,
	linux-arm-kernel@lists.infradead.org,
	linux-nvme@lists.infradead.org, asahi@lists.linux.dev,
	linux-kernel@vger.kernel.org, 이은수,
	칸찬

On Thu, Mar 26, 2026 at 03:31:24PM +0100, hch@lst.de wrote:
> On Thu, Mar 26, 2026 at 08:28:46AM -0600, Keith Busch wrote:
> > On Thu, Mar 26, 2026 at 03:51:52PM +0900, 전민식 wrote:
> > >  {
> > >  	struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
> > >  
> > > -	trace_nvme_complete_rq(req);
> > > +	/*
> > > +	 * The idea for these trace events was to match up commands
> > > +	 * dispatched to hardware with the hardware's posted response.
> > > +	 * So skip tracing for undispatched commands.
> > > +	 */
> > > +	if (nvme_req(req)->status != NVME_SC_HOST_PATH_ERROR)
> > > +		trace_nvme_complete_rq(req);
> > > +
> > 
> > Well, how do we know a controller doesnn't actually return that status
> > code? I was just suggesting to skip the trace for the condition we never
> > dispatched the command. An added bonus is we don't need a mostly
> > unnecessary 'if' check on every IO.
> 
> The 7?h error values were added for host use.  The description of
> the section in the spec suggests this, but isn't actually as clear
> as I would like it.  I wonder if we need to verify that controller
> don't incorrectly return it, as that could cause some problems?

Yeah, the spec is not very clear on their use. It just defines the codes
and a one sentence description, and then never mentioned again. I think
it allows the possibility for the controller to internally complete a
command with such status from some undefined OOB mechanism. I'm not sure
why the host needs to have their own self-generated status codes anyway
since it can already attach whatever arbitrary flags it wants to its
private data, like how we use NVME_REQ_CANCELLED.

Anyway if a controller did return it for whatever reason, even if it is
a bug, we'd want to see it in the trace.


^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH v5] nvme: Skip trace complete_rq on host path error
  2026-03-26 14:43                     ` Keith Busch
@ 2026-03-27  6:19                       ` 전민식
  2026-03-27  6:37                       ` 전민식
  1 sibling, 0 replies; 19+ messages in thread
From: 전민식 @ 2026-03-27  6:19 UTC (permalink / raw)
  To: Keith Busch, hch@lst.de
  Cc: Justin Tee, axboe@kernel.dk, sven@kernel.org, j@jannau.net,
	neal@gompa.dev, sagi@grimberg.me, justin.tee@broadcom.com,
	nareshgottumukkala83@gmail.com, paul.ely@broadcom.com,
	James Smart, kch@nvidia.com, linux-arm-kernel@lists.infradead.org,
	linux-nvme@lists.infradead.org, asahi@lists.linux.dev,
	linux-kernel@vger.kernel.org, 이은수,
	칸찬, 전민식

On Thu, Mar 26, 2026 at 03:43PM +0100, hch@lst.de wrote:
> Yeah, the spec is not very clear on their use. It just defines the codes
> and a one sentence description, and then never mentioned again. I think
> it allows the possibility for the controller to internally complete a
> command with such status from some undefined OOB mechanism. I'm not sure
> why the host needs to have their own self-generated status codes anyway
> since it can already attach whatever arbitrary flags it wants to its
> private data, like how we use NVME_REQ_CANCELLED.
> 
> Anyway if a controller did return it for whatever reason, even if it is
> a bug, we'd want to see it in the trace.


I've confirmed what you discussed, and it seems like the approach you
proposed is a better one.
Can I send patch v7 as above?

Regrad,
Minsik Jeon


^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH v5] nvme: Skip trace complete_rq on host path error
  2026-03-26 14:43                     ` Keith Busch
  2026-03-27  6:19                       ` 전민식
@ 2026-03-27  6:37                       ` 전민식
  1 sibling, 0 replies; 19+ messages in thread
From: 전민식 @ 2026-03-27  6:37 UTC (permalink / raw)
  To: Keith Busch, hch@lst.de
  Cc: Justin Tee, axboe@kernel.dk, sven@kernel.org, j@jannau.net,
	neal@gompa.dev, sagi@grimberg.me, justin.tee@broadcom.com,
	nareshgottumukkala83@gmail.com, paul.ely@broadcom.com,
	James Smart, kch@nvidia.com, linux-arm-kernel@lists.infradead.org,
	linux-nvme@lists.infradead.org, asahi@lists.linux.dev,
	linux-kernel@vger.kernel.org, 이은수,
	칸찬, 전민식

On Thu, Mar 26, 2026 at 08:43 -0600, Keith Busch wrote:
> Yeah, the spec is not very clear on their use. It just defines the codes
> and a one sentence description, and then never mentioned again. I think
> it allows the possibility for the controller to internally complete a
> command with such status from some undefined OOB mechanism. I'm not sure
> why the host needs to have their own self-generated status codes anyway
> since it can already attach whatever arbitrary flags it wants to its
> private data, like how we use NVME_REQ_CANCELLED.
> 
> Anyway if a controller did return it for whatever reason, even if it is
> a bug, we'd want to see it in the trace.

Sorry, The date is wrong, so I send it again.
On Thu, Mar 26, 2026 at 08:43 -0600, hch@lst.de wrote:
-> On Thu, Mar 26, 2026 at 14:43PM UTC, Keith Busch wrote:

I've confirmed what you discussed, and it seems like the approach you
proposed is a better one.
Can I send patch v7 as above?


Regards,
Minsik Jeon


^ permalink raw reply	[flat|nested] 19+ messages in thread

end of thread, other threads:[~2026-03-27  6:37 UTC | newest]

Thread overview: 19+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
     [not found] <CGME20260320052101epcms2p42ae135da60b36685e9b7fca6849b57a6@epcms2p4>
2026-03-20  5:21 ` [PATCH] nvme: Move nvme_setup_cmd before hot_pathing 전민식
2026-03-20 13:43   ` Kanchan Joshi
2026-03-23  1:04     ` [PATCH v2] " 전민식
2026-03-22  7:16   ` [PATCH] " kernel test robot
2026-03-22  8:22   ` kernel test robot
2026-03-22 12:09   ` kernel test robot
2026-03-24 23:55   ` Justin Tee
2026-03-25  6:33     ` [PATCH v3] nvme: Add nvme_setup_cmd to host_path_error 전민식
2026-03-25 18:37       ` Justin Tee
2026-03-25 19:03         ` Keith Busch
2026-03-26  1:44           ` [PATCH v4] nvme: Skip trace complete_rq on host path error 전민식
2026-03-26  6:14             ` hch
2026-03-26  6:51               ` [PATCH v5] " 전민식
2026-03-26 14:20                 ` hch
2026-03-26 14:28                 ` Keith Busch
2026-03-26 14:31                   ` hch
2026-03-26 14:43                     ` Keith Busch
2026-03-27  6:19                       ` 전민식
2026-03-27  6:37                       ` 전민식

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox