From: Saeed Mahameed <saeed@kernel.org>
To: "David S. Miller" <davem@davemloft.net>,
Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
Eric Dumazet <edumazet@google.com>
Cc: Saeed Mahameed <saeedm@nvidia.com>,
netdev@vger.kernel.org, Tariq Toukan <tariqt@nvidia.com>,
Yevgeny Kliteynik <kliteyn@nvidia.com>,
Alex Vesker <valex@nvidia.com>
Subject: [net-next 10/14] net/mlx5: DR, Allocate icm_chunks from their own slab allocator
Date: Mon, 24 Oct 2022 14:57:30 +0100 [thread overview]
Message-ID: <20221024135734.69673-11-saeed@kernel.org> (raw)
In-Reply-To: <20221024135734.69673-1-saeed@kernel.org>
From: Yevgeny Kliteynik <kliteyn@nvidia.com>
SW steering allocates/frees lots of icm_chunk structs. To make this more
efficiently, create a separate kmem_cache and allocate these chunks from
this allocator.
By doing this we observe that the alloc/free "hiccups" frequency has
become much lower, which allows for a more steady rule insersion rate.
Signed-off-by: Yevgeny Kliteynik <kliteyn@nvidia.com>
Reviewed-by: Alex Vesker <valex@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
---
.../mellanox/mlx5/core/steering/dr_domain.c | 15 ++++++++++++++-
.../mellanox/mlx5/core/steering/dr_icm_pool.c | 11 +++++++++--
.../mellanox/mlx5/core/steering/dr_types.h | 1 +
3 files changed, 24 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
index 3dc784b22741..3fbcb2883a26 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
@@ -60,10 +60,19 @@ static int dr_domain_init_mem_resources(struct mlx5dr_domain *dmn)
{
int ret;
+ dmn->chunks_kmem_cache = kmem_cache_create("mlx5_dr_chunks",
+ sizeof(struct mlx5dr_icm_chunk), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!dmn->chunks_kmem_cache) {
+ mlx5dr_err(dmn, "Couldn't create chunks kmem_cache\n");
+ return -ENOMEM;
+ }
+
dmn->ste_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_STE);
if (!dmn->ste_icm_pool) {
mlx5dr_err(dmn, "Couldn't get icm memory\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto free_chunks_kmem_cache;
}
dmn->action_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_ACTION);
@@ -85,6 +94,9 @@ static int dr_domain_init_mem_resources(struct mlx5dr_domain *dmn)
mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
free_ste_icm_pool:
mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
+free_chunks_kmem_cache:
+ kmem_cache_destroy(dmn->chunks_kmem_cache);
+
return ret;
}
@@ -93,6 +105,7 @@ static void dr_domain_uninit_mem_resources(struct mlx5dr_domain *dmn)
mlx5dr_send_info_pool_destroy(dmn);
mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
+ kmem_cache_destroy(dmn->chunks_kmem_cache);
}
static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
index 7ca1ef073f55..be02546a7de0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
@@ -9,6 +9,8 @@ struct mlx5dr_icm_pool {
enum mlx5dr_icm_type icm_type;
enum mlx5dr_icm_chunk_size max_log_chunk_sz;
struct mlx5dr_domain *dmn;
+ struct kmem_cache *chunks_kmem_cache;
+
/* memory management */
struct mutex mutex; /* protect the ICM pool and ICM buddy */
struct list_head buddy_mem_list;
@@ -193,10 +195,13 @@ static void dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk, int offset)
static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk)
{
+ struct kmem_cache *chunks_cache =
+ chunk->buddy_mem->pool->chunks_kmem_cache;
+
chunk->buddy_mem->used_memory -= mlx5dr_icm_pool_get_chunk_byte_size(chunk);
list_del(&chunk->chunk_list);
- kvfree(chunk);
+ kmem_cache_free(chunks_cache, chunk);
}
static int dr_icm_buddy_init_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
@@ -302,10 +307,11 @@ dr_icm_chunk_create(struct mlx5dr_icm_pool *pool,
struct mlx5dr_icm_buddy_mem *buddy_mem_pool,
unsigned int seg)
{
+ struct kmem_cache *chunks_cache = buddy_mem_pool->pool->chunks_kmem_cache;
struct mlx5dr_icm_chunk *chunk;
int offset;
- chunk = kvzalloc(sizeof(*chunk), GFP_KERNEL);
+ chunk = kmem_cache_alloc(chunks_cache, GFP_KERNEL);
if (!chunk)
return NULL;
@@ -482,6 +488,7 @@ struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
pool->dmn = dmn;
pool->icm_type = icm_type;
pool->max_log_chunk_sz = max_log_chunk_sz;
+ pool->chunks_kmem_cache = dmn->chunks_kmem_cache;
INIT_LIST_HEAD(&pool->buddy_mem_list);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 244685453a27..4f38f0f5b352 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -915,6 +915,7 @@ struct mlx5dr_domain {
struct mlx5dr_icm_pool *action_icm_pool;
struct mlx5dr_send_info_pool *send_info_pool_rx;
struct mlx5dr_send_info_pool *send_info_pool_tx;
+ struct kmem_cache *chunks_kmem_cache;
struct mlx5dr_send_ring *send_ring;
struct mlx5dr_domain_info info;
struct xarray csum_fts_xa;
--
2.37.3
next prev parent reply other threads:[~2022-10-24 16:26 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-10-24 13:57 [pull request][net-next 00/14] mlx5 updates 2022-10-24 Saeed Mahameed
2022-10-24 13:57 ` [net-next 01/14] net/mlx5: DR, In destroy flow, free resources even if FW command failed Saeed Mahameed
2022-10-24 13:57 ` [net-next 02/14] net/mlx5: DR, Fix the SMFS sync_steering for fast teardown Saeed Mahameed
2022-10-24 13:57 ` [net-next 03/14] net/mlx5: DR, Check device state when polling CQ Saeed Mahameed
2022-10-24 13:57 ` [net-next 04/14] net/mlx5: DR, Remove unneeded argument from dr_icm_chunk_destroy Saeed Mahameed
2022-10-24 13:57 ` [net-next 05/14] net/mlx5: DR, Allocate ste_arr on stack instead of dynamically Saeed Mahameed
2022-10-25 4:24 ` Jakub Kicinski
2022-10-25 10:22 ` Yevgeny Kliteynik
2022-10-24 13:57 ` [net-next 06/14] net/mlx5: DR, Initialize chunk's ste_arrays at chunk creation Saeed Mahameed
2022-10-24 13:57 ` [net-next 07/14] net/mlx5: DR, Handle domain memory resources init/uninit separately Saeed Mahameed
2022-10-24 13:57 ` [net-next 08/14] net/mlx5: DR, In rehash write the line in the entry immediately Saeed Mahameed
2022-10-24 13:57 ` [net-next 09/14] net/mlx5: DR, Manage STE send info objects in pool Saeed Mahameed
2022-10-24 13:57 ` Saeed Mahameed [this message]
2022-10-24 13:57 ` [net-next 11/14] net/mlx5: DR, Allocate htbl from its own slab allocator Saeed Mahameed
2022-10-24 13:57 ` [net-next 12/14] net/mlx5: DR, Lower sync threshold for ICM hot memory Saeed Mahameed
2022-10-24 13:57 ` [net-next 13/14] net/mlx5: DR, Keep track of hot ICM chunks in an array instead of list Saeed Mahameed
2022-10-24 13:57 ` [net-next 14/14] net/mlx5: DR, Remove the buddy used_list Saeed Mahameed
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221024135734.69673-11-saeed@kernel.org \
--to=saeed@kernel.org \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=kliteyn@nvidia.com \
--cc=kuba@kernel.org \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=saeedm@nvidia.com \
--cc=tariqt@nvidia.com \
--cc=valex@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).