From: Saeed Mahameed <saeed@kernel.org>
To: "David S. Miller" <davem@davemloft.net>,
Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
Eric Dumazet <edumazet@google.com>
Cc: Saeed Mahameed <saeedm@nvidia.com>,
netdev@vger.kernel.org, Tariq Toukan <tariqt@nvidia.com>,
Yevgeny Kliteynik <kliteyn@nvidia.com>,
Alex Vesker <valex@nvidia.com>
Subject: [net-next V2 14/14] net/mlx5: DR, Remove the buddy used_list
Date: Thu, 27 Oct 2022 15:56:43 +0100 [thread overview]
Message-ID: <20221027145643.6618-15-saeed@kernel.org> (raw)
In-Reply-To: <20221027145643.6618-1-saeed@kernel.org>
From: Yevgeny Kliteynik <kliteyn@nvidia.com>
No need to have the used_list - we don't need to keep track of the
used chunks, we only need to know the amount of used memory.
Signed-off-by: Yevgeny Kliteynik <kliteyn@nvidia.com>
Reviewed-by: Alex Vesker <valex@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
---
.../mellanox/mlx5/core/steering/dr_buddy.c | 1 -
.../mellanox/mlx5/core/steering/dr_icm_pool.c | 50 +++++--------------
.../mellanox/mlx5/core/steering/dr_types.h | 1 -
.../mellanox/mlx5/core/steering/mlx5dr.h | 3 +-
4 files changed, 13 insertions(+), 42 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c
index 7e30dc64c10c..fe228d948b47 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c
@@ -15,7 +15,6 @@ int mlx5dr_buddy_init(struct mlx5dr_icm_buddy_mem *buddy,
buddy->max_order = max_order;
INIT_LIST_HEAD(&buddy->list_node);
- INIT_LIST_HEAD(&buddy->used_list);
buddy->bitmap = kcalloc(buddy->max_order + 1,
sizeof(*buddy->bitmap),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
index c31608a3f11c..3eb6719bc8eb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
@@ -207,17 +207,6 @@ static void dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk, int offset)
num_of_entries * sizeof(chunk->ste_arr[0]));
}
-static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk)
-{
- struct kmem_cache *chunks_cache =
- chunk->buddy_mem->pool->chunks_kmem_cache;
-
- chunk->buddy_mem->used_memory -= mlx5dr_icm_pool_get_chunk_byte_size(chunk);
- list_del(&chunk->chunk_list);
-
- kmem_cache_free(chunks_cache, chunk);
-}
-
static int dr_icm_buddy_init_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
{
int num_of_entries =
@@ -297,11 +286,6 @@ static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool)
static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy)
{
- struct mlx5dr_icm_chunk *chunk, *next;
-
- list_for_each_entry_safe(chunk, next, &buddy->used_list, chunk_list)
- dr_icm_chunk_destroy(chunk);
-
dr_icm_pool_mr_destroy(buddy->icm_mr);
mlx5dr_buddy_cleanup(buddy);
@@ -312,36 +296,25 @@ static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy)
kvfree(buddy);
}
-static struct mlx5dr_icm_chunk *
-dr_icm_chunk_create(struct mlx5dr_icm_pool *pool,
- enum mlx5dr_icm_chunk_size chunk_size,
- struct mlx5dr_icm_buddy_mem *buddy_mem_pool,
- unsigned int seg)
+static void
+dr_icm_chunk_init(struct mlx5dr_icm_chunk *chunk,
+ struct mlx5dr_icm_pool *pool,
+ enum mlx5dr_icm_chunk_size chunk_size,
+ struct mlx5dr_icm_buddy_mem *buddy_mem_pool,
+ unsigned int seg)
{
- struct kmem_cache *chunks_cache = buddy_mem_pool->pool->chunks_kmem_cache;
- struct mlx5dr_icm_chunk *chunk;
int offset;
- chunk = kmem_cache_alloc(chunks_cache, GFP_KERNEL);
- if (!chunk)
- return NULL;
-
- offset = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type) * seg;
-
chunk->seg = seg;
chunk->size = chunk_size;
chunk->buddy_mem = buddy_mem_pool;
- if (pool->icm_type == DR_ICM_TYPE_STE)
+ if (pool->icm_type == DR_ICM_TYPE_STE) {
+ offset = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type) * seg;
dr_icm_chunk_ste_init(chunk, offset);
+ }
buddy_mem_pool->used_memory += mlx5dr_icm_pool_get_chunk_byte_size(chunk);
- INIT_LIST_HEAD(&chunk->chunk_list);
-
- /* chunk now is part of the used_list */
- list_add_tail(&chunk->chunk_list, &buddy_mem_pool->used_list);
-
- return chunk;
}
static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool)
@@ -463,10 +436,12 @@ mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
if (ret)
goto out;
- chunk = dr_icm_chunk_create(pool, chunk_size, buddy, seg);
+ chunk = kmem_cache_alloc(pool->chunks_kmem_cache, GFP_KERNEL);
if (!chunk)
goto out_err;
+ dr_icm_chunk_init(chunk, pool, chunk_size, buddy, seg);
+
goto out;
out_err:
@@ -495,7 +470,6 @@ void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk)
hot_chunk->seg = chunk->seg;
hot_chunk->size = chunk->size;
- list_del(&chunk->chunk_list);
kmem_cache_free(chunks_cache, chunk);
/* Check if we have chunks that are waiting for sync-ste */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index bd2c3073591e..41a37b9ac98b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -1111,7 +1111,6 @@ int mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste **ste_arr,
struct mlx5dr_icm_chunk {
struct mlx5dr_icm_buddy_mem *buddy_mem;
- struct list_head chunk_list;
/* indicates the index of this chunk in the whole memory,
* used for deleting the chunk from the buddy
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index 674efb3607b1..84ed77763b21 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -164,8 +164,7 @@ struct mlx5dr_icm_buddy_mem {
struct mlx5dr_icm_mr *icm_mr;
struct mlx5dr_icm_pool *pool;
- /* This is the list of used chunks. HW may be accessing this memory */
- struct list_head used_list;
+ /* Amount of memory in used chunks - HW may be accessing this memory */
u64 used_memory;
/* Memory optimisation */
--
2.37.3
prev parent reply other threads:[~2022-10-27 14:58 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-10-27 14:56 [pull request][net-next V2 00/14] mlx5 updates 2022-10-24 Saeed Mahameed
2022-10-27 14:56 ` [net-next V2 01/14] net/mlx5: DR, In destroy flow, free resources even if FW command failed Saeed Mahameed
2022-10-29 6:30 ` patchwork-bot+netdevbpf
2022-10-27 14:56 ` [net-next V2 02/14] net/mlx5: DR, Fix the SMFS sync_steering for fast teardown Saeed Mahameed
2022-10-27 14:56 ` [net-next V2 03/14] net/mlx5: DR, Check device state when polling CQ Saeed Mahameed
2022-10-27 14:56 ` [net-next V2 04/14] net/mlx5: DR, Remove unneeded argument from dr_icm_chunk_destroy Saeed Mahameed
2022-10-27 14:56 ` [net-next V2 05/14] net/mlx5: DR, For short chains of STEs, avoid allocating ste_arr dynamically Saeed Mahameed
2022-10-27 14:56 ` [net-next V2 06/14] net/mlx5: DR, Initialize chunk's ste_arrays at chunk creation Saeed Mahameed
2022-10-27 14:56 ` [net-next V2 07/14] net/mlx5: DR, Handle domain memory resources init/uninit separately Saeed Mahameed
2022-10-27 14:56 ` [net-next V2 08/14] net/mlx5: DR, In rehash write the line in the entry immediately Saeed Mahameed
2022-10-27 14:56 ` [net-next V2 09/14] net/mlx5: DR, Manage STE send info objects in pool Saeed Mahameed
2022-10-27 14:56 ` [net-next V2 10/14] net/mlx5: DR, Allocate icm_chunks from their own slab allocator Saeed Mahameed
2022-10-27 14:56 ` [net-next V2 11/14] net/mlx5: DR, Allocate htbl from its " Saeed Mahameed
2022-10-27 14:56 ` [net-next V2 12/14] net/mlx5: DR, Lower sync threshold for ICM hot memory Saeed Mahameed
2022-10-27 14:56 ` [net-next V2 13/14] net/mlx5: DR, Keep track of hot ICM chunks in an array instead of list Saeed Mahameed
2022-10-27 14:56 ` Saeed Mahameed [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221027145643.6618-15-saeed@kernel.org \
--to=saeed@kernel.org \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=kliteyn@nvidia.com \
--cc=kuba@kernel.org \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=saeedm@nvidia.com \
--cc=tariqt@nvidia.com \
--cc=valex@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).