From: Keith Busch <kbusch@meta.com>
To: <hch@lst.de>, <sagi@grimberg.me>, <linux-nvme@lists.infradead.org>
Cc: Keith Busch <kbusch@kernel.org>
Subject: [PATCH-part-2 6/9] nvme-pci: common dma pool alloc helper
Date: Wed, 4 Sep 2024 11:38:14 -0700 [thread overview]
Message-ID: <20240904183818.713941-7-kbusch@meta.com> (raw)
In-Reply-To: <20240904183818.713941-1-kbusch@meta.com>
From: Keith Busch <kbusch@kernel.org>
The more complicated sgl and prp setup does the same thing to select a
pool, allocate and initialize the iod dma fields. Provide a common
helper for this.
Signed-off-by: Keith Busch <kbusch@kernel.org>
---
drivers/nvme/host/pci.c | 69 ++++++++++++++++++++---------------------
1 file changed, 34 insertions(+), 35 deletions(-)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index cdba1f8e0bba6..4980dde804a0e 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -217,6 +217,7 @@ struct nvme_queue {
};
union nvme_descriptor {
+ void *list;
struct nvme_sgl_desc *sg_list;
__le64 *prp_list;
};
@@ -586,6 +587,29 @@ static void nvme_print_sgl(struct scatterlist *sgl, int nents)
}
}
+static struct dma_pool *nvme_pci_pool_alloc(struct nvme_dev *dev,
+ unsigned nents, size_t desc_size,
+ struct nvme_iod *iod)
+{
+ struct dma_pool *pool;
+
+ if (nents <= (256 / desc_size)) {
+ pool = dev->prp_small_pool;
+ iod->nr_allocations = 0;
+ } else {
+ pool = dev->prp_page_pool;
+ iod->nr_allocations = 1;
+ }
+
+ iod->list[0].list = dma_pool_alloc(pool, GFP_ATOMIC, &iod->first_dma);
+ if (!iod->list[0].list) {
+ iod->nr_allocations = -1;
+ return NULL;
+ }
+
+ return pool;
+}
+
static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
struct request *req)
{
@@ -599,7 +623,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
__le64 *prp_list;
dma_addr_t prp_dma;
- int nprps, i;
+ int nprps, i = 0;
length -= (NVME_CTRL_PAGE_SIZE - offset);
if (length <= 0) {
@@ -622,22 +646,11 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
}
nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE);
- if (nprps <= (256 / 8)) {
- pool = dev->prp_small_pool;
- iod->nr_allocations = 0;
- } else {
- pool = dev->prp_page_pool;
- iod->nr_allocations = 1;
- }
-
- prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
- if (!prp_list) {
- iod->nr_allocations = -1;
+ pool = nvme_pci_pool_alloc(dev, nprps, 8, iod);
+ if (!pool)
return BLK_STS_RESOURCE;
- }
- iod->list[0].prp_list = prp_list;
- iod->first_dma = prp_dma;
- i = 0;
+
+ prp_list = iod->list[0].prp_list;
for (;;) {
if (i == NVME_CTRL_PAGE_SIZE >> 3) {
__le64 *old_prp_list = prp_list;
@@ -697,12 +710,10 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- struct nvme_rw_command *cmd = &iod->cmd.rw;
- struct dma_pool *pool;
struct nvme_sgl_desc *sg_list;
+ struct nvme_rw_command *cmd = &iod->cmd.rw;
struct scatterlist *sg = iod->sgt.sgl;
unsigned int entries = iod->sgt.nents;
- dma_addr_t sgl_dma;
int i = 0;
/* setting the transfer type as SGL */
@@ -713,24 +724,12 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
return BLK_STS_OK;
}
- if (entries <= (256 / sizeof(struct nvme_sgl_desc))) {
- pool = dev->prp_small_pool;
- iod->nr_allocations = 0;
- } else {
- pool = dev->prp_page_pool;
- iod->nr_allocations = 1;
- }
-
- sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
- if (!sg_list) {
- iod->nr_allocations = -1;
+ if (!nvme_pci_pool_alloc(dev, entries, sizeof(struct nvme_sgl_desc),
+ iod))
return BLK_STS_RESOURCE;
- }
-
- iod->list[0].sg_list = sg_list;
- iod->first_dma = sgl_dma;
- nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries);
+ sg_list = iod->list[0].sg_list;
+ nvme_pci_sgl_set_seg(&cmd->dptr.sgl, iod->first_dma, entries);
do {
nvme_pci_sgl_set_data(&sg_list[i++], sg);
sg = sg_next(sg);
--
2.43.5
next prev parent reply other threads:[~2024-09-04 18:38 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-09-04 18:38 [PATCH-part-2 0/9] nvme support for sgl mptr, safe passthrough Keith Busch
2024-09-04 18:38 ` [PATCH-part-2 1/9] nvme-pci: use sgl capable helper function Keith Busch
2024-09-04 18:38 ` [PATCH-part-2 2/9] nvme-pci: provide prp selection helper Keith Busch
2024-09-06 11:22 ` Anuj Gupta
2024-09-04 18:38 ` [PATCH-part-2 3/9] nvme-pci: split out the simple dma mapping Keith Busch
2024-09-04 18:38 ` [PATCH-part-2 4/9] nvme-pci: remove "dma_len" from nvme_iod Keith Busch
2024-09-04 18:38 ` [PATCH-part-2 5/9] nvme-pci: simplify io setup function parameters Keith Busch
2024-09-06 11:24 ` Anuj Gupta
2024-09-04 18:38 ` Keith Busch [this message]
2024-09-04 18:38 ` [PATCH-part-2 7/9] nvme-pci: provide a sgl mapping helper Keith Busch
2024-09-04 18:38 ` [PATCH-part-2 8/9] nvme-pci: add support for sgl metadata Keith Busch
2024-09-04 19:22 ` Keith Busch
2024-09-04 18:38 ` [PATCH-part-2 9/9] nvme: force sgls on user passthrough if possible Keith Busch
2024-09-04 19:23 ` Keith Busch
2024-09-06 17:29 ` [PATCH-part-2 0/9] nvme support for sgl mptr, safe passthrough Keith Busch
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240904183818.713941-7-kbusch@meta.com \
--to=kbusch@meta.com \
--cc=hch@lst.de \
--cc=kbusch@kernel.org \
--cc=linux-nvme@lists.infradead.org \
--cc=sagi@grimberg.me \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox