linux-nvme.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
From: hch@infradead.org (Christoph Hellwig)
Subject: [RCF PATCH 1/2] nvme: pci: Move CMB allocation into a pool.
Date: Fri, 20 Jul 2018 07:49:26 -0700	[thread overview]
Message-ID: <20180720144926.GB29886@infradead.org> (raw)
In-Reply-To: <20180719230628.31494-2-scott.bauer@intel.com>

On Thu, Jul 19, 2018@05:06:27PM -0600, Scott Bauer wrote:
> This switches the CMB allocation from straight offset calculations
> into allocating from a pool.

Please don't change this code now.  Logan has been working for a while
to get PCIe P2P support upstream which will move this to a generic
pool:

https://github.com/sbates130272/linux-p2pmem/commits/pci-p2p-v4

> 
> Signed-off-by: Scott Bauer <scott.bauer at intel.com>
> ---
>  drivers/nvme/host/pci.c | 58 ++++++++++++++++++++++++++++++++++-------
>  1 file changed, 48 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
> index 8dcae11bbf3a..b8c81be4a985 100644
> --- a/drivers/nvme/host/pci.c
> +++ b/drivers/nvme/host/pci.c
> @@ -30,6 +30,7 @@
>  #include <linux/types.h>
>  #include <linux/io-64-nonatomic-lo-hi.h>
>  #include <linux/sed-opal.h>
> +#include <linux/genalloc.h>
>  
>  #include "nvme.h"
>  
> @@ -100,7 +101,7 @@ struct nvme_dev {
>  	struct mutex shutdown_lock;
>  	bool subsystem;
>  	void __iomem *cmb;
> -	pci_bus_addr_t cmb_bus_addr;
> +	struct gen_pool *cmb_pool;
>  	u64 cmb_size;
>  	u32 cmbsz;
>  	u32 cmbloc;
> @@ -1300,6 +1301,12 @@ static void nvme_free_queue(struct nvme_queue *nvmeq)
>  	if (nvmeq->sq_cmds)
>  		dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
>  					nvmeq->sq_cmds, nvmeq->sq_dma_addr);
> +	if (nvmeq->sq_cmds_io) {
> +		gen_pool_free(nvmeq->dev->cmb_pool,(unsigned long)nvmeq->sq_cmds_io,
> +			      roundup(SQ_SIZE(nvmeq->q_depth),
> +				      nvmeq->dev->ctrl.page_size));
> +		nvmeq->sq_cmds_io = NULL;
> +	}
>  }
>  
>  static void nvme_free_queues(struct nvme_dev *dev, int lowest)
> @@ -1467,14 +1474,19 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
>  static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
>  {
>  	struct nvme_dev *dev = nvmeq->dev;
> -	int result;
> +	int result = -ENOMEM;
>  	s16 vector;
> +	unsigned size;
>  
> +	size = roundup(SQ_SIZE(nvmeq->q_depth), dev->ctrl.page_size);
>  	if (dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
> -		unsigned offset = (qid - 1) * roundup(SQ_SIZE(nvmeq->q_depth),
> -						      dev->ctrl.page_size);
> -		nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
> -		nvmeq->sq_cmds_io = dev->cmb + offset;
> +		nvmeq->sq_cmds_io = (void *) gen_pool_alloc(dev->cmb_pool, size);
> +		if (!nvmeq->sq_cmds_io)
> +			return result;
> +
> +		nvmeq->sq_dma_addr =
> +			gen_pool_virt_to_phys(dev->cmb_pool,
> +					      (unsigned long)nvmeq->sq_cmds_io);
>  	}
>  
>  	/*
> @@ -1710,8 +1722,7 @@ static void nvme_map_cmb(struct nvme_dev *dev)
>  	u64 size, offset;
>  	resource_size_t bar_size;
>  	struct pci_dev *pdev = to_pci_dev(dev->dev);
> -	int bar;
> -
> +	int bar, ret;
>  	dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
>  	if (!dev->cmbsz)
>  		return;
> @@ -1733,24 +1744,51 @@ static void nvme_map_cmb(struct nvme_dev *dev)
>  	 * for example, due to being behind a bridge. Reduce the CMB to
>  	 * the reported size of the BAR
>  	 */
> +
>  	if (size > bar_size - offset)
>  		size = bar_size - offset;
>  
> +	dev->cmb_pool = gen_pool_create(PAGE_SHIFT, dev_to_node(&pdev->dev));
> +	if (!dev->cmb_pool)
> +		return;
> +
>  	dev->cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size);
>  	if (!dev->cmb)
> -		return;
> -	dev->cmb_bus_addr = pci_bus_address(pdev, bar) + offset;
> +		goto unwind_pool;
> +
> +	ret = gen_pool_add_virt(dev->cmb_pool, (unsigned long) dev->cmb,
> +				pci_bus_address(pdev, bar) + offset,
> +				size, dev_to_node(&pdev->dev));
> +
> +	if (ret) {
> +		pr_err("%s: failed to add our virt to the gen pool\n", __func__);
> +		goto unwind_pool_cmb;
> +	}
> +
>  	dev->cmb_size = size;
>  
>  	if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
>  				    &dev_attr_cmb.attr, NULL))
>  		dev_warn(dev->ctrl.device,
>  			 "failed to add sysfs attribute for CMB\n");
> +
> +	return;
> +
> + unwind_pool_cmb:
> +	iounmap(dev->cmb);
> +	dev->cmb = NULL;
> + unwind_pool:
> +	gen_pool_destroy(dev->cmb_pool);
> +	dev->cmb_pool = NULL;
>  }
>  
>  static inline void nvme_release_cmb(struct nvme_dev *dev)
>  {
>  	if (dev->cmb) {
> +		if (use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS))
> +			nvme_free_queues(dev, 1);
> +		gen_pool_destroy(dev->cmb_pool);
> +		dev->cmb_pool = NULL;
>  		iounmap(dev->cmb);
>  		dev->cmb = NULL;
>  		sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
> -- 
> 2.17.1
> 
> 
> _______________________________________________
> Linux-nvme mailing list
> Linux-nvme at lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-nvme
---end quoted text---

  reply	other threads:[~2018-07-20 14:49 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-07-19 23:06 [RFC PATCH 0/2] Re-work CMB and add WDS support Scott Bauer
2018-07-19 23:06 ` [RCF PATCH 1/2] nvme: pci: Move CMB allocation into a pool Scott Bauer
2018-07-20 14:49   ` Christoph Hellwig [this message]
2018-07-19 23:06 ` [RCF PATCH 2/2] nvme-pci: Bounce data from Host memory to CMB Memory Scott Bauer
2018-07-20 14:23   ` Keith Busch
2018-07-20 14:49     ` Christoph Hellwig
2018-07-20 14:53       ` Scott Bauer
2018-07-20 14:46 ` [RFC PATCH 0/2] Re-work CMB and add WDS support Christoph Hellwig
2018-07-20 14:50   ` Scott Bauer
2018-07-20 16:01     ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180720144926.GB29886@infradead.org \
    --to=hch@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).