From: Amir Vadai <amirv@mellanox.com>
To: "David S. Miller" <davem@davemloft.net>
Cc: netdev@vger.kernel.org, Yevgeny Petrilin <yevgenyp@mellanox.com>,
Saeed Mahameed <saeedm@mellanox.com>,
Or Gerlitz <ogerlitz@mellanox.com>,
Achiad Shochat <achiad@mellanox.com>,
Ido Shamay <idos@mellanox.com>, Amir Vadai <amirv@mellanox.com>
Subject: [PATCH net-next 03/11] net/mlx5_core: Virtually extend work/completion queue buffers by one page
Date: Wed, 8 Apr 2015 17:51:17 +0300 [thread overview]
Message-ID: <1428504685-8945-4-git-send-email-amirv@mellanox.com> (raw)
In-Reply-To: <1428504685-8945-1-git-send-email-amirv@mellanox.com>
From: Achiad Shochat <achiad@mellanox.com>
- Direct buf allocation will be allocated via mlx5_buf_alloc_direct.
- Add and export mlx5_buf_alloc_pages to be used by mlx5_buf_alloc as before.
- Add virtual_extension parameter to be used to request to virtually extended
buffer pages for cyclic usage, to avoid handling wqe wrap around the buffer.
Signed-off-by: Achiad Shochat <achiad@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
Signed-off-by: Amir Vadai <amirv@mellanox.com>
---
drivers/net/ethernet/mellanox/mlx5/core/alloc.c | 126 ++++++++++++++----------
include/linux/mlx5/driver.h | 2 +
2 files changed, 76 insertions(+), 52 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index ac0f7bf..ffda222 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -47,70 +47,92 @@
* multiple pages, so we don't require too much contiguous memory.
*/
-int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
- struct mlx5_buf *buf)
+static int mlx5_buf_alloc_direct(struct mlx5_core_dev *dev, int size,
+ struct mlx5_buf *buf)
{
dma_addr_t t;
- buf->size = size;
- if (size <= max_direct) {
- buf->nbufs = 1;
- buf->npages = 1;
- buf->page_shift = (u8)get_order(size) + PAGE_SHIFT;
- buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev,
- size, &t, GFP_KERNEL);
- if (!buf->direct.buf)
- return -ENOMEM;
-
- buf->direct.map = t;
-
- while (t & ((1 << buf->page_shift) - 1)) {
- --buf->page_shift;
- buf->npages *= 2;
- }
- } else {
- int i;
-
- buf->direct.buf = NULL;
- buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
- buf->npages = buf->nbufs;
- buf->page_shift = PAGE_SHIFT;
- buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
- GFP_KERNEL);
- if (!buf->page_list)
- return -ENOMEM;
-
- for (i = 0; i < buf->nbufs; i++) {
- buf->page_list[i].buf =
- dma_zalloc_coherent(&dev->pdev->dev, PAGE_SIZE,
- &t, GFP_KERNEL);
- if (!buf->page_list[i].buf)
- goto err_free;
-
- buf->page_list[i].map = t;
- }
-
- if (BITS_PER_LONG == 64) {
- struct page **pages;
- pages = kmalloc(sizeof(*pages) * buf->nbufs, GFP_KERNEL);
- if (!pages)
- goto err_free;
- for (i = 0; i < buf->nbufs; i++)
- pages[i] = virt_to_page(buf->page_list[i].buf);
- buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
- kfree(pages);
- if (!buf->direct.buf)
- goto err_free;
- }
+ buf->size = size;
+ buf->nbufs = 1;
+ buf->npages = 1;
+ buf->page_shift = (u8)get_order(size) + PAGE_SHIFT;
+ buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev,
+ size, &t, GFP_KERNEL);
+ if (!buf->direct.buf)
+ return -ENOMEM;
+
+ buf->direct.map = t;
+
+ while (t & ((1 << buf->page_shift) - 1)) {
+ --buf->page_shift;
+ buf->npages *= 2;
}
return 0;
+}
+
+int mlx5_buf_alloc_pages(struct mlx5_core_dev *dev, int size,
+ int virtual_extension, struct mlx5_buf *buf)
+{
+ dma_addr_t t;
+ int i;
+
+ buf->size = size;
+ buf->direct.buf = NULL;
+ buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+ buf->npages = buf->nbufs;
+ buf->page_shift = PAGE_SHIFT;
+ buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
+ GFP_KERNEL);
+ if (!buf->page_list)
+ return -ENOMEM;
+
+ for (i = 0; i < buf->nbufs; i++) {
+ buf->page_list[i].buf =
+ dma_zalloc_coherent(&dev->pdev->dev, PAGE_SIZE,
+ &t, GFP_KERNEL);
+ if (!buf->page_list[i].buf)
+ goto err_free;
+
+ buf->page_list[i].map = t;
+ }
+
+ if (BITS_PER_LONG == 64) {
+ struct page **pages;
+ int npages = buf->nbufs + (virtual_extension ? 1 : 0);
+
+ pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
+ goto err_free;
+
+ for (i = 0; i < buf->nbufs; i++)
+ pages[i] = virt_to_page(buf->page_list[i].buf);
+
+ if (virtual_extension)
+ pages[buf->nbufs] = pages[0];
+
+ buf->direct.buf = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
+ kfree(pages);
+ if (!buf->direct.buf)
+ goto err_free;
+ }
+ return 0;
err_free:
mlx5_buf_free(dev, buf);
return -ENOMEM;
}
+EXPORT_SYMBOL_GPL(mlx5_buf_alloc_pages);
+
+int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
+ struct mlx5_buf *buf)
+{
+ if (size <= max_direct)
+ return mlx5_buf_alloc_direct(dev, size, buf);
+
+ return mlx5_buf_alloc_pages(dev, size, 0, buf);
+}
EXPORT_SYMBOL_GPL(mlx5_buf_alloc);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index fc45233..0c93745 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -676,6 +676,8 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
struct mlx5_buf *buf);
+int mlx5_buf_alloc_pages(struct mlx5_core_dev *dev, int size,
+ int virtual_extension, struct mlx5_buf *buf);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
gfp_t flags, int npages);
--
1.9.3
next prev parent reply other threads:[~2015-04-08 14:51 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-04-08 14:51 [PATCH net-next 00/11] net/mlx5: ConnectX-4 100G Ethernet driver Amir Vadai
2015-04-08 14:51 ` [PATCH net-next 01/11] net/mlx5_core: Set irq affinity hints Amir Vadai
2015-04-10 8:27 ` Ido Shamay
2015-04-12 11:15 ` Saeed Mahameed
2015-04-08 14:51 ` [PATCH net-next 02/11] net/mlx5_core: Add EQ renaming mechanism Amir Vadai
2015-04-08 14:51 ` Amir Vadai [this message]
2015-04-08 16:25 ` [PATCH net-next 03/11] net/mlx5_core: Virtually extend work/completion queue buffers by one page David Miller
2015-04-08 17:44 ` Amir Vadai
2015-04-08 14:51 ` [PATCH net-next 04/11] net/mlx5_core: HW data structs/types definitions preparation for mlx5 ehternet driver Amir Vadai
2015-04-08 16:46 ` Joe Perches
2015-04-12 15:54 ` Amir Vadai
2015-04-08 14:51 ` [PATCH net-next 05/11] net/mlx5_core/ib: New device capabilities handling Amir Vadai
2015-04-08 14:51 ` [PATCH net-next 06/11] net/mlx5_core: Implement get and set functions of ptys register fields Amir Vadai
2015-04-08 14:51 ` [PATCH net-next 07/11] net/mlx5_core: Implement get/set port status Amir Vadai
2015-04-08 18:26 ` Sergei Shtylyov
2015-04-08 18:38 ` Amir Vadai
2015-04-08 14:51 ` [PATCH net-next 08/11] net/mlx5_core: Modify CQ moderation parameters Amir Vadai
2015-04-08 14:51 ` [PATCH net-next 09/11] net/mlx5: Ethernet Datapath files Amir Vadai
2015-04-09 2:01 ` Alexander Duyck
2015-04-12 11:33 ` Saeed Mahameed
2015-04-12 11:52 ` Ido Shamay
2015-04-08 14:51 ` [PATCH net-next 10/11] net/mlx5: Ethernet resources handling Amir Vadai
2015-04-08 14:51 ` [PATCH net-next 11/11] net/mlx5: Ethernet driver Amir Vadai
2015-04-08 15:57 ` Eric Dumazet
2015-04-09 2:07 ` Alexander Duyck
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1428504685-8945-4-git-send-email-amirv@mellanox.com \
--to=amirv@mellanox.com \
--cc=achiad@mellanox.com \
--cc=davem@davemloft.net \
--cc=idos@mellanox.com \
--cc=netdev@vger.kernel.org \
--cc=ogerlitz@mellanox.com \
--cc=saeedm@mellanox.com \
--cc=yevgenyp@mellanox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).