Linux-NVME Archive on lore.kernel.org
 help / color / mirror / Atom feed
* fix error unwinding in nvme_map_data
@ 2021-01-20  9:49 Christoph Hellwig
  2021-01-20  9:49 ` [PATCH 1/2] nvme-pci: refactor nvme_unmap_data Christoph Hellwig
  2021-01-20  9:49 ` [PATCH 2/2] nvme-pci: fix error unwind in nvme_map_data Christoph Hellwig
  0 siblings, 2 replies; 8+ messages in thread
From: Christoph Hellwig @ 2021-01-20  9:49 UTC (permalink / raw)
  To: linux-nvme; +Cc: Marc Orr

Hi all,

this series fixes the error unwinding in nvme_map_data after a report
from Marc that we see unmaps of not actually mapped data in AMD SEV
environments (which happen to be fairly DMA constained).

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH 1/2] nvme-pci: refactor nvme_unmap_data
  2021-01-20  9:49 fix error unwinding in nvme_map_data Christoph Hellwig
@ 2021-01-20  9:49 ` Christoph Hellwig
  2021-01-20 15:19   ` Marc Orr
  2021-01-20  9:49 ` [PATCH 2/2] nvme-pci: fix error unwind in nvme_map_data Christoph Hellwig
  1 sibling, 1 reply; 8+ messages in thread
From: Christoph Hellwig @ 2021-01-20  9:49 UTC (permalink / raw)
  To: linux-nvme; +Cc: Marc Orr

Split out three helpers from nvme_unmap_data that will allow finer grained
unwinding from nvme_map_data.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/nvme/host/pci.c | 77 ++++++++++++++++++++++++++---------------
 1 file changed, 49 insertions(+), 28 deletions(-)

diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 25456d02eddb8c..e29ece9e4d4b8e 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -543,50 +543,71 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
 	return true;
 }
 
-static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
+static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
 {
-	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
 	const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
-	dma_addr_t dma_addr = iod->first_dma, next_dma_addr;
+	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+	dma_addr_t dma_addr = iod->first_dma;
 	int i;
 
-	if (iod->dma_len) {
-		dma_unmap_page(dev->dev, dma_addr, iod->dma_len,
-			       rq_dma_dir(req));
-		return;
+	for (i = 0; i < iod->npages; i++) {
+		__le64 *prp_list = nvme_pci_iod_list(req)[i];
+		dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
+
+		dma_pool_free(dev->prp_page_pool, prp_list, dma_addr);
+		dma_addr = next_dma_addr;
 	}
 
-	WARN_ON_ONCE(!iod->nents);
+}
 
-	if (is_pci_p2pdma_page(sg_page(iod->sg)))
-		pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
-				    rq_dma_dir(req));
-	else
-		dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
+static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
+{
+	const int last_sg = SGES_PER_PAGE - 1;
+	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+	dma_addr_t dma_addr = iod->first_dma;
+	int i;
 
+	for (i = 0; i < iod->npages; i++) {
+		struct nvme_sgl_desc *sg_list = nvme_pci_iod_list(req)[i];
+		dma_addr_t next_dma_addr = le64_to_cpu((sg_list[last_sg]).addr);
 
-	if (iod->npages == 0)
-		dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
-			dma_addr);
+		dma_pool_free(dev->prp_page_pool, sg_list, dma_addr);
+		dma_addr = next_dma_addr;
+	}
 
-	for (i = 0; i < iod->npages; i++) {
-		void *addr = nvme_pci_iod_list(req)[i];
+}
 
-		if (iod->use_sgl) {
-			struct nvme_sgl_desc *sg_list = addr;
+static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req)
+{
+	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
 
-			next_dma_addr =
-			    le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
-		} else {
-			__le64 *prp_list = addr;
+	if (is_pci_p2pdma_page(sg_page(iod->sg)))
+		pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
+				    rq_dma_dir(req));
+	else
+		dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
+}
 
-			next_dma_addr = le64_to_cpu(prp_list[last_prp]);
-		}
+static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
+{
+	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
 
-		dma_pool_free(dev->prp_page_pool, addr, dma_addr);
-		dma_addr = next_dma_addr;
+	if (iod->dma_len) {
+		dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len,
+			       rq_dma_dir(req));
+		return;
 	}
 
+	WARN_ON_ONCE(!iod->nents);
+
+	nvme_unmap_sg(dev, req);
+	if (iod->npages == 0)
+		dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
+			      iod->first_dma);
+	else if (iod->use_sgl)
+		nvme_free_sgls(dev, req);
+	else
+		nvme_free_prps(dev, req);
 	mempool_free(iod->sg, dev->iod_mempool);
 }
 
-- 
2.29.2


_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 2/2] nvme-pci: fix error unwind in nvme_map_data
  2021-01-20  9:49 fix error unwinding in nvme_map_data Christoph Hellwig
  2021-01-20  9:49 ` [PATCH 1/2] nvme-pci: refactor nvme_unmap_data Christoph Hellwig
@ 2021-01-20  9:49 ` Christoph Hellwig
  2021-01-20 15:23   ` Marc Orr
  1 sibling, 1 reply; 8+ messages in thread
From: Christoph Hellwig @ 2021-01-20  9:49 UTC (permalink / raw)
  To: linux-nvme; +Cc: Marc Orr

Properly unwind step by step using refactored helpers from nvme_unmap_data
to avoid a potential double dma_unmap on a mapping failure.

Reported-by: Marc Orr <marcorr@google.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/nvme/host/pci.c | 28 ++++++++++++++++++----------
 1 file changed, 18 insertions(+), 10 deletions(-)

diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index e29ece9e4d4b8e..4e93d1b52df202 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -683,7 +683,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
 			__le64 *old_prp_list = prp_list;
 			prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
 			if (!prp_list)
-				return BLK_STS_RESOURCE;
+				goto free_prps;
 			list[iod->npages++] = prp_list;
 			prp_list[0] = old_prp_list[i - 1];
 			old_prp_list[i - 1] = cpu_to_le64(prp_dma);
@@ -703,14 +703,14 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
 		dma_addr = sg_dma_address(sg);
 		dma_len = sg_dma_len(sg);
 	}
-
 done:
 	cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
 	cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
-
 	return BLK_STS_OK;
-
- bad_sgl:
+free_prps:
+	nvme_free_prps(dev, req);
+	return BLK_STS_RESOURCE;
+bad_sgl:
 	WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
 			"Invalid SGL for payload:%d nents:%d\n",
 			blk_rq_payload_bytes(req), iod->nents);
@@ -782,7 +782,7 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
 
 			sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
 			if (!sg_list)
-				return BLK_STS_RESOURCE;
+				goto free_sgls;
 
 			i = 0;
 			nvme_pci_iod_list(req)[iod->npages++] = sg_list;
@@ -795,6 +795,9 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
 	} while (--entries > 0);
 
 	return BLK_STS_OK;
+free_sgls:
+	nvme_free_sgls(dev, req);
+	return BLK_STS_RESOURCE;
 }
 
 static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
@@ -863,7 +866,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
 	sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
 	iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
 	if (!iod->nents)
-		goto out;
+		goto out_free_sg;
 
 	if (is_pci_p2pdma_page(sg_page(iod->sg)))
 		nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg,
@@ -872,16 +875,21 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
 		nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
 					     rq_dma_dir(req), DMA_ATTR_NO_WARN);
 	if (!nr_mapped)
-		goto out;
+		goto out_free_sg;
 
 	iod->use_sgl = nvme_pci_use_sgls(dev, req);
 	if (iod->use_sgl)
 		ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
 	else
 		ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
-out:
 	if (ret != BLK_STS_OK)
-		nvme_unmap_data(dev, req);
+		goto out_dma_unmap;
+	return BLK_STS_OK;
+
+out_dma_unmap:
+	nvme_unmap_sg(dev, req);
+out_free_sg:
+	mempool_free(iod->sg, dev->iod_mempool);
 	return ret;
 }
 
-- 
2.29.2


_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/2] nvme-pci: refactor nvme_unmap_data
  2021-01-20  9:49 ` [PATCH 1/2] nvme-pci: refactor nvme_unmap_data Christoph Hellwig
@ 2021-01-20 15:19   ` Marc Orr
  0 siblings, 0 replies; 8+ messages in thread
From: Marc Orr @ 2021-01-20 15:19 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-nvme

On Wed, Jan 20, 2021 at 1:49 AM Christoph Hellwig <hch@lst.de> wrote:
>
> Split out three helpers from nvme_unmap_data that will allow finer grained
> unwinding from nvme_map_data.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  drivers/nvme/host/pci.c | 77 ++++++++++++++++++++++++++---------------
>  1 file changed, 49 insertions(+), 28 deletions(-)
>
> diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
> index 25456d02eddb8c..e29ece9e4d4b8e 100644
> --- a/drivers/nvme/host/pci.c
> +++ b/drivers/nvme/host/pci.c
> @@ -543,50 +543,71 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
>         return true;
>  }
>
> -static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
> +static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
>  {
> -       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
>         const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
> -       dma_addr_t dma_addr = iod->first_dma, next_dma_addr;
> +       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
> +       dma_addr_t dma_addr = iod->first_dma;
>         int i;
>
> -       if (iod->dma_len) {
> -               dma_unmap_page(dev->dev, dma_addr, iod->dma_len,
> -                              rq_dma_dir(req));
> -               return;
> +       for (i = 0; i < iod->npages; i++) {
> +               __le64 *prp_list = nvme_pci_iod_list(req)[i];
> +               dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
> +
> +               dma_pool_free(dev->prp_page_pool, prp_list, dma_addr);
> +               dma_addr = next_dma_addr;
>         }
>
> -       WARN_ON_ONCE(!iod->nents);
> +}
>
> -       if (is_pci_p2pdma_page(sg_page(iod->sg)))
> -               pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
> -                                   rq_dma_dir(req));
> -       else
> -               dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
> +static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
> +{
> +       const int last_sg = SGES_PER_PAGE - 1;
> +       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
> +       dma_addr_t dma_addr = iod->first_dma;
> +       int i;
>
> +       for (i = 0; i < iod->npages; i++) {
> +               struct nvme_sgl_desc *sg_list = nvme_pci_iod_list(req)[i];
> +               dma_addr_t next_dma_addr = le64_to_cpu((sg_list[last_sg]).addr);
>
> -       if (iod->npages == 0)
> -               dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
> -                       dma_addr);
> +               dma_pool_free(dev->prp_page_pool, sg_list, dma_addr);
> +               dma_addr = next_dma_addr;
> +       }
>
> -       for (i = 0; i < iod->npages; i++) {
> -               void *addr = nvme_pci_iod_list(req)[i];
> +}
>
> -               if (iod->use_sgl) {
> -                       struct nvme_sgl_desc *sg_list = addr;
> +static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req)
> +{
> +       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
>
> -                       next_dma_addr =
> -                           le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
> -               } else {
> -                       __le64 *prp_list = addr;
> +       if (is_pci_p2pdma_page(sg_page(iod->sg)))
> +               pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
> +                                   rq_dma_dir(req));
> +       else
> +               dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
> +}
>
> -                       next_dma_addr = le64_to_cpu(prp_list[last_prp]);
> -               }
> +static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
> +{
> +       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
>
> -               dma_pool_free(dev->prp_page_pool, addr, dma_addr);
> -               dma_addr = next_dma_addr;
> +       if (iod->dma_len) {
> +               dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len,
> +                              rq_dma_dir(req));
> +               return;
>         }
>
> +       WARN_ON_ONCE(!iod->nents);
> +
> +       nvme_unmap_sg(dev, req);
> +       if (iod->npages == 0)
> +               dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
> +                             iod->first_dma);
> +       else if (iod->use_sgl)
> +               nvme_free_sgls(dev, req);
> +       else
> +               nvme_free_prps(dev, req);
>         mempool_free(iod->sg, dev->iod_mempool);
>  }

Reviewed-by: Marc Orr <marcorr@google.com>

>
> --
> 2.29.2
>

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 2/2] nvme-pci: fix error unwind in nvme_map_data
  2021-01-20  9:49 ` [PATCH 2/2] nvme-pci: fix error unwind in nvme_map_data Christoph Hellwig
@ 2021-01-20 15:23   ` Marc Orr
  2021-01-20 16:48     ` Christoph Hellwig
  0 siblings, 1 reply; 8+ messages in thread
From: Marc Orr @ 2021-01-20 15:23 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-nvme

On Wed, Jan 20, 2021 at 1:49 AM Christoph Hellwig <hch@lst.de> wrote:
>
> Properly unwind step by step using refactored helpers from nvme_unmap_data
> to avoid a potential double dma_unmap on a mapping failure.
>
> Reported-by: Marc Orr <marcorr@google.com>
> Signed-off-by: Christoph Hellwig <hch@lst.de>

Should these patches go to stable?

> ---
>  drivers/nvme/host/pci.c | 28 ++++++++++++++++++----------
>  1 file changed, 18 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
> index e29ece9e4d4b8e..4e93d1b52df202 100644
> --- a/drivers/nvme/host/pci.c
> +++ b/drivers/nvme/host/pci.c
> @@ -683,7 +683,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
>                         __le64 *old_prp_list = prp_list;
>                         prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
>                         if (!prp_list)
> -                               return BLK_STS_RESOURCE;
> +                               goto free_prps;
>                         list[iod->npages++] = prp_list;
>                         prp_list[0] = old_prp_list[i - 1];
>                         old_prp_list[i - 1] = cpu_to_le64(prp_dma);
> @@ -703,14 +703,14 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
>                 dma_addr = sg_dma_address(sg);
>                 dma_len = sg_dma_len(sg);
>         }
> -
>  done:
>         cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
>         cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
> -
>         return BLK_STS_OK;
> -
> - bad_sgl:
> +free_prps:
> +       nvme_free_prps(dev, req);
> +       return BLK_STS_RESOURCE;
> +bad_sgl:
>         WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
>                         "Invalid SGL for payload:%d nents:%d\n",
>                         blk_rq_payload_bytes(req), iod->nents);
> @@ -782,7 +782,7 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
>
>                         sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
>                         if (!sg_list)
> -                               return BLK_STS_RESOURCE;
> +                               goto free_sgls;
>
>                         i = 0;
>                         nvme_pci_iod_list(req)[iod->npages++] = sg_list;
> @@ -795,6 +795,9 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
>         } while (--entries > 0);
>
>         return BLK_STS_OK;
> +free_sgls:
> +       nvme_free_sgls(dev, req);
> +       return BLK_STS_RESOURCE;
>  }
>
>  static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
> @@ -863,7 +866,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
>         sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
>         iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
>         if (!iod->nents)
> -               goto out;
> +               goto out_free_sg;
>
>         if (is_pci_p2pdma_page(sg_page(iod->sg)))
>                 nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg,
> @@ -872,16 +875,21 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
>                 nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
>                                              rq_dma_dir(req), DMA_ATTR_NO_WARN);
>         if (!nr_mapped)
> -               goto out;
> +               goto out_free_sg;
>
>         iod->use_sgl = nvme_pci_use_sgls(dev, req);
>         if (iod->use_sgl)
>                 ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
>         else
>                 ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
> -out:
>         if (ret != BLK_STS_OK)
> -               nvme_unmap_data(dev, req);
> +               goto out_dma_unmap;
> +       return BLK_STS_OK;
> +
> +out_dma_unmap:

nit: Naming this label out_sg_unmap or out_unmap_sg might be a little
more clear, since the line below doesn't have the word dma anywhere.

> +       nvme_unmap_sg(dev, req);
> +out_free_sg:
> +       mempool_free(iod->sg, dev->iod_mempool);
>         return ret;
>  }

Reviewed-by: Marc Orr <marcorr@google.com>

>
> --
> 2.29.2
>

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 2/2] nvme-pci: fix error unwind in nvme_map_data
  2021-01-20 15:23   ` Marc Orr
@ 2021-01-20 16:48     ` Christoph Hellwig
  2021-01-20 17:42       ` Keith Busch
  0 siblings, 1 reply; 8+ messages in thread
From: Christoph Hellwig @ 2021-01-20 16:48 UTC (permalink / raw)
  To: Marc Orr; +Cc: Christoph Hellwig, linux-nvme

On Wed, Jan 20, 2021 at 07:23:39AM -0800, Marc Orr wrote:
> On Wed, Jan 20, 2021 at 1:49 AM Christoph Hellwig <hch@lst.de> wrote:
> >
> > Properly unwind step by step using refactored helpers from nvme_unmap_data
> > to avoid a potential double dma_unmap on a mapping failure.
> >
> > Reported-by: Marc Orr <marcorr@google.com>
> > Signed-off-by: Christoph Hellwig <hch@lst.de>
> 
> Should these patches go to stable?

Yes.  I've added the proper fixes tag:

Fixes: 7fe07d14f71f ("nvme-pci: merge nvme_free_iod into nvme_unmap_data")

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 2/2] nvme-pci: fix error unwind in nvme_map_data
  2021-01-20 16:48     ` Christoph Hellwig
@ 2021-01-20 17:42       ` Keith Busch
  2021-01-20 17:56         ` Christoph Hellwig
  0 siblings, 1 reply; 8+ messages in thread
From: Keith Busch @ 2021-01-20 17:42 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: Marc Orr, linux-nvme

On Wed, Jan 20, 2021 at 05:48:24PM +0100, Christoph Hellwig wrote:
> On Wed, Jan 20, 2021 at 07:23:39AM -0800, Marc Orr wrote:
> > On Wed, Jan 20, 2021 at 1:49 AM Christoph Hellwig <hch@lst.de> wrote:
> > >
> > > Properly unwind step by step using refactored helpers from nvme_unmap_data
> > > to avoid a potential double dma_unmap on a mapping failure.
> > >
> > > Reported-by: Marc Orr <marcorr@google.com>
> > > Signed-off-by: Christoph Hellwig <hch@lst.de>
> > 
> > Should these patches go to stable?
> 
> Yes.  I've added the proper fixes tag:
> 
> Fixes: 7fe07d14f71f ("nvme-pci: merge nvme_free_iod into nvme_unmap_data")

Patches look good.

Reviewed-by: Keith Busch <kbusch@kernel.org>

Just fyi, the 2/2 is the only one with the "Fixes" tag, but it depends
on 1/2 to compile. It applies cleanly to the recent stables, though, so
stable bot might have some trouble with it.

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 2/2] nvme-pci: fix error unwind in nvme_map_data
  2021-01-20 17:42       ` Keith Busch
@ 2021-01-20 17:56         ` Christoph Hellwig
  0 siblings, 0 replies; 8+ messages in thread
From: Christoph Hellwig @ 2021-01-20 17:56 UTC (permalink / raw)
  To: Keith Busch; +Cc: Marc Orr, Christoph Hellwig, linux-nvme

On Thu, Jan 21, 2021 at 02:42:52AM +0900, Keith Busch wrote:
> On Wed, Jan 20, 2021 at 05:48:24PM +0100, Christoph Hellwig wrote:
> > On Wed, Jan 20, 2021 at 07:23:39AM -0800, Marc Orr wrote:
> > > On Wed, Jan 20, 2021 at 1:49 AM Christoph Hellwig <hch@lst.de> wrote:
> > > >
> > > > Properly unwind step by step using refactored helpers from nvme_unmap_data
> > > > to avoid a potential double dma_unmap on a mapping failure.
> > > >
> > > > Reported-by: Marc Orr <marcorr@google.com>
> > > > Signed-off-by: Christoph Hellwig <hch@lst.de>
> > > 
> > > Should these patches go to stable?
> > 
> > Yes.  I've added the proper fixes tag:
> > 
> > Fixes: 7fe07d14f71f ("nvme-pci: merge nvme_free_iod into nvme_unmap_data")
> 
> Patches look good.
> 
> Reviewed-by: Keith Busch <kbusch@kernel.org>
> 
> Just fyi, the 2/2 is the only one with the "Fixes" tag, but it depends
> on 1/2 to compile. It applies cleanly to the recent stables, though, so
> stable bot might have some trouble with it.

patch 2 obviously depends on 1, the bot has been pretty good at
figuring that out.

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2021-01-20 17:56 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2021-01-20  9:49 fix error unwinding in nvme_map_data Christoph Hellwig
2021-01-20  9:49 ` [PATCH 1/2] nvme-pci: refactor nvme_unmap_data Christoph Hellwig
2021-01-20 15:19   ` Marc Orr
2021-01-20  9:49 ` [PATCH 2/2] nvme-pci: fix error unwind in nvme_map_data Christoph Hellwig
2021-01-20 15:23   ` Marc Orr
2021-01-20 16:48     ` Christoph Hellwig
2021-01-20 17:42       ` Keith Busch
2021-01-20 17:56         ` Christoph Hellwig

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox