From: Keith Busch <kbusch@kernel.org>
To: Christoph Hellwig <hch@lst.de>
Cc: Robin Murphy <robin.murphy@arm.com>,
Pradeep P V K <pradeep.pragallapati@oss.qualcomm.com>,
axboe@kernel.dk, sagi@grimberg.me,
linux-nvme@lists.infradead.org, linux-kernel@vger.kernel.org,
nitin.rawat@oss.qualcomm.com, Leon Romanovsky <leon@kernel.org>,
Marek Szyprowski <m.szyprowski@samsung.com>,
iommu@lists.linux.dev
Subject: Re: [PATCH V1] nvme-pci: Fix NULL pointer dereference in nvme_pci_prp_iter_next
Date: Mon, 2 Feb 2026 11:59:04 -0700 [thread overview]
Message-ID: <aYDz-BeIHMNUMiaF@kbusch-mbp> (raw)
In-Reply-To: <20260202173624.GA32713@lst.de>
On Mon, Feb 02, 2026 at 06:36:24PM +0100, Christoph Hellwig wrote:
> diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
> index 2a52cf46d960..f944b747e1bd 100644
> --- a/drivers/nvme/host/pci.c
> +++ b/drivers/nvme/host/pci.c
> @@ -816,6 +816,22 @@ static void nvme_unmap_data(struct request *req)
> nvme_free_descriptors(req);
> }
>
> +static bool nvme_pci_alloc_dma_vecs(struct request *req,
> + struct blk_dma_iter *iter)
> +{
> + struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
> + struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
> +
> + iod->dma_vecs = mempool_alloc(nvmeq->dev->dmavec_mempool,
> + GFP_ATOMIC);
> + if (!iod->dma_vecs)
> + return false;
> + iod->dma_vecs[0].addr = iter->addr;
> + iod->dma_vecs[0].len = iter->len;
> + iod->nr_dma_vecs = 1;
> + return true;
> +}
> +
> static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev,
> struct blk_dma_iter *iter)
> {
> @@ -826,6 +842,8 @@ static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev,
> if (!blk_rq_dma_map_iter_next(req, dma_dev, iter))
> return false;
> if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(dma_dev)) {
> + if (!iod->nr_dma_vecs && !nvme_pci_alloc_dma_vecs(req, iter))
> + return false;
In the case where this iteration caused dma_need_unmap() to toggle to
true, this is the iteration that allocates the dma_vecs, and it
initializes the first entry to this iter. But the next lines proceed to
the save this iter in the next index, so it's doubly accounted for and
will get unmapped twice in the completion.
Also, if the allocation fails, we should set iter->status to
BLK_STS_RESOURCE so the callers know why the iteration can't continue.
Otherwise, the caller will think the request is badly formed if you
return false from here without setting iter->status.
Here's my quick take. Boot tested with swiotlb enabled, but haven't
tried to test the changing dma_need_unmap() scenario.
---
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 9fc4a60280a07..233378faab9bd 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -816,6 +816,28 @@ static void nvme_unmap_data(struct request *req)
nvme_free_descriptors(req);
}
+static bool nvme_pci_prp_save_mapping(struct blk_dma_iter *iter,
+ struct request *req)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+ if (!iod->dma_vecs) {
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+
+ iod->dma_vecs = mempool_alloc(nvmeq->dev->dmavec_mempool,
+ GFP_ATOMIC);
+ if (!iod->dma_vecs) {
+ iter->status = BLK_STS_RESOURCE;
+ return false;
+ }
+ }
+
+ iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr;
+ iod->dma_vecs[iod->nr_dma_vecs].len = iter->len;
+ iod->nr_dma_vecs++;
+ return true;
+}
+
static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev,
struct blk_dma_iter *iter)
{
@@ -825,11 +847,8 @@ static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev,
return true;
if (!blk_rq_dma_map_iter_next(req, dma_dev, iter))
return false;
- if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(dma_dev)) {
- iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr;
- iod->dma_vecs[iod->nr_dma_vecs].len = iter->len;
- iod->nr_dma_vecs++;
- }
+ if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(dma_dev))
+ return nvme_pci_prp_save_mapping(iter, req);
return true;
}
@@ -843,15 +862,9 @@ static blk_status_t nvme_pci_setup_data_prp(struct request *req,
unsigned int prp_len, i;
__le64 *prp_list;
- if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(nvmeq->dev->dev)) {
- iod->dma_vecs = mempool_alloc(nvmeq->dev->dmavec_mempool,
- GFP_ATOMIC);
- if (!iod->dma_vecs)
- return BLK_STS_RESOURCE;
- iod->dma_vecs[0].addr = iter->addr;
- iod->dma_vecs[0].len = iter->len;
- iod->nr_dma_vecs = 1;
- }
+ if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(nvmeq->dev->dev))
+ if (!nvme_pci_prp_save_mapping(iter, req))
+ return iter->status;
/*
* PRP1 always points to the start of the DMA transfers.
@@ -1218,6 +1231,8 @@ static blk_status_t nvme_prep_rq(struct request *req)
iod->nr_descriptors = 0;
iod->total_len = 0;
iod->meta_total_len = 0;
+ iod->nr_dma_vecs = 0;
+ iod->dma_vecs = NULL;
ret = nvme_setup_cmd(req->q->queuedata, req);
if (ret)
--
next prev parent reply other threads:[~2026-02-02 18:59 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-02 12:57 [PATCH V1] nvme-pci: Fix NULL pointer dereference in nvme_pci_prp_iter_next Pradeep P V K
2026-02-02 14:35 ` Christoph Hellwig
2026-02-02 15:16 ` Robin Murphy
2026-02-02 15:58 ` Leon Romanovsky
2026-02-02 17:13 ` Keith Busch
2026-02-02 17:36 ` Christoph Hellwig
2026-02-02 18:59 ` Keith Busch [this message]
2026-02-03 5:27 ` Christoph Hellwig
2026-02-03 6:14 ` Keith Busch
2026-02-03 6:23 ` Christoph Hellwig
2026-02-03 14:05 ` Pradeep Pragallapati
2026-02-04 14:04 ` Pradeep Pragallapati
2026-02-04 14:27 ` Keith Busch
2026-02-03 9:42 ` Leon Romanovsky
2026-02-03 13:50 ` Robin Murphy
2026-02-03 17:41 ` Keith Busch
2026-02-02 17:39 ` Robin Murphy
2026-02-02 15:22 ` Leon Romanovsky
2026-02-02 15:26 ` Robin Murphy
2026-02-02 17:18 ` Keith Busch
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=aYDz-BeIHMNUMiaF@kbusch-mbp \
--to=kbusch@kernel.org \
--cc=axboe@kernel.dk \
--cc=hch@lst.de \
--cc=iommu@lists.linux.dev \
--cc=leon@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-nvme@lists.infradead.org \
--cc=m.szyprowski@samsung.com \
--cc=nitin.rawat@oss.qualcomm.com \
--cc=pradeep.pragallapati@oss.qualcomm.com \
--cc=robin.murphy@arm.com \
--cc=sagi@grimberg.me \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox