netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Saeed Mahameed <saeed@kernel.org>
To: "David S. Miller" <davem@davemloft.net>,
	Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
	Eric Dumazet <edumazet@google.com>
Cc: Saeed Mahameed <saeedm@nvidia.com>,
	netdev@vger.kernel.org, Tariq Toukan <tariqt@nvidia.com>,
	Jianbo Liu <jianbol@nvidia.com>
Subject: [net-next 13/15] net/mlx5: Add async garbage collector for DEK bulk
Date: Mon, 30 Jan 2023 19:11:59 -0800	[thread overview]
Message-ID: <20230131031201.35336-14-saeed@kernel.org> (raw)
In-Reply-To: <20230131031201.35336-1-saeed@kernel.org>

From: Jianbo Liu <jianbol@nvidia.com>

After invalidation, the idle bulk with all DEKs available for use, is
destroyed, to free keys and mem.

To get better performance, the firmware destruction operation is done
asynchronously. So idle bulks are enqueued in destroy_list first, then
destroyed in system workqueue. This will improve performance, as the
destruction doesn't need to hold pool's mutex.

Signed-off-by: Jianbo Liu <jianbol@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
---
 .../ethernet/mellanox/mlx5/core/lib/crypto.c  | 74 ++++++++++++++++---
 1 file changed, 65 insertions(+), 9 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
index 8f8c18f80601..e078530ef37d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
@@ -46,6 +46,10 @@ struct mlx5_crypto_dek_pool {
 	bool syncing;
 	struct list_head wait_for_free;
 	struct work_struct sync_work;
+
+	spinlock_t destroy_lock; /* protect destroy_list */
+	struct list_head destroy_list;
+	struct work_struct destroy_work;
 };
 
 struct mlx5_crypto_dek_bulk {
@@ -351,13 +355,15 @@ static void mlx5_crypto_dek_bulk_free(struct mlx5_crypto_dek_bulk *bulk)
 }
 
 static void mlx5_crypto_dek_pool_remove_bulk(struct mlx5_crypto_dek_pool *pool,
-					     struct mlx5_crypto_dek_bulk *bulk)
+					     struct mlx5_crypto_dek_bulk *bulk,
+					     bool delay)
 {
 	pool->num_deks -= bulk->num_deks;
 	pool->avail_deks -= bulk->avail_deks;
 	pool->in_use_deks -= bulk->in_use_deks;
 	list_del(&bulk->entry);
-	mlx5_crypto_dek_bulk_free(bulk);
+	if (!delay)
+		mlx5_crypto_dek_bulk_free(bulk);
 }
 
 static struct mlx5_crypto_dek_bulk *
@@ -500,6 +506,23 @@ static void mlx5_crypto_dek_bulk_reset_synced(struct mlx5_crypto_dek_pool *pool,
 	}
 }
 
+static void mlx5_crypto_dek_bulk_handle_avail(struct mlx5_crypto_dek_pool *pool,
+					      struct mlx5_crypto_dek_bulk *bulk,
+					      struct list_head *destroy_list)
+{
+	mlx5_crypto_dek_pool_remove_bulk(pool, bulk, true);
+	list_add(&bulk->entry, destroy_list);
+}
+
+static void mlx5_crypto_dek_pool_splice_destroy_list(struct mlx5_crypto_dek_pool *pool,
+						     struct list_head *list,
+						     struct list_head *head)
+{
+	spin_lock(&pool->destroy_lock);
+	list_splice_init(list, head);
+	spin_unlock(&pool->destroy_lock);
+}
+
 static void mlx5_crypto_dek_pool_free_wait_keys(struct mlx5_crypto_dek_pool *pool)
 {
 	struct mlx5_crypto_dek *dek, *next;
@@ -512,16 +535,18 @@ static void mlx5_crypto_dek_pool_free_wait_keys(struct mlx5_crypto_dek_pool *poo
 
 /* For all the bulks in each list, reset the bits while sync.
  * Move them to different lists according to the number of available DEKs.
+ * Destrory all the idle bulks for now.
  * And free DEKs in the waiting list at the end of this func.
  */
 static void mlx5_crypto_dek_pool_reset_synced(struct mlx5_crypto_dek_pool *pool)
 {
 	struct mlx5_crypto_dek_bulk *bulk, *tmp;
+	LIST_HEAD(destroy_list);
 
 	list_for_each_entry_safe(bulk, tmp, &pool->partial_list, entry) {
 		mlx5_crypto_dek_bulk_reset_synced(pool, bulk);
 		if (MLX5_CRYPTO_DEK_BULK_IDLE(bulk))
-			list_move(&bulk->entry, &pool->avail_list);
+			mlx5_crypto_dek_bulk_handle_avail(pool, bulk, &destroy_list);
 	}
 
 	list_for_each_entry_safe(bulk, tmp, &pool->full_list, entry) {
@@ -531,7 +556,7 @@ static void mlx5_crypto_dek_pool_reset_synced(struct mlx5_crypto_dek_pool *pool)
 			continue;
 
 		if (MLX5_CRYPTO_DEK_BULK_IDLE(bulk))
-			list_move(&bulk->entry, &pool->avail_list);
+			mlx5_crypto_dek_bulk_handle_avail(pool, bulk, &destroy_list);
 		else
 			list_move(&bulk->entry, &pool->partial_list);
 	}
@@ -541,10 +566,16 @@ static void mlx5_crypto_dek_pool_reset_synced(struct mlx5_crypto_dek_pool *pool)
 		bulk->avail_start = 0;
 		bulk->avail_deks = bulk->num_deks;
 		pool->avail_deks += bulk->num_deks;
+		mlx5_crypto_dek_bulk_handle_avail(pool, bulk, &destroy_list);
 	}
-	list_splice_init(&pool->sync_list, &pool->avail_list);
 
 	mlx5_crypto_dek_pool_free_wait_keys(pool);
+
+	if (!list_empty(&destroy_list)) {
+		mlx5_crypto_dek_pool_splice_destroy_list(pool, &destroy_list,
+							 &pool->destroy_list);
+		schedule_work(&pool->destroy_work);
+	}
 }
 
 static void mlx5_crypto_dek_sync_work_fn(struct work_struct *work)
@@ -620,6 +651,25 @@ void mlx5_crypto_dek_destroy(struct mlx5_crypto_dek_pool *dek_pool,
 	}
 }
 
+static void mlx5_crypto_dek_free_destroy_list(struct list_head *destroy_list)
+{
+	struct mlx5_crypto_dek_bulk *bulk, *tmp;
+
+	list_for_each_entry_safe(bulk, tmp, destroy_list, entry)
+		mlx5_crypto_dek_bulk_free(bulk);
+}
+
+static void mlx5_crypto_dek_destroy_work_fn(struct work_struct *work)
+{
+	struct mlx5_crypto_dek_pool *pool =
+		container_of(work, struct mlx5_crypto_dek_pool, destroy_work);
+	LIST_HEAD(destroy_list);
+
+	mlx5_crypto_dek_pool_splice_destroy_list(pool, &pool->destroy_list,
+						 &destroy_list);
+	mlx5_crypto_dek_free_destroy_list(&destroy_list);
+}
+
 struct mlx5_crypto_dek_pool *
 mlx5_crypto_dek_pool_create(struct mlx5_core_dev *mdev, int key_purpose)
 {
@@ -639,6 +689,9 @@ mlx5_crypto_dek_pool_create(struct mlx5_core_dev *mdev, int key_purpose)
 	INIT_LIST_HEAD(&pool->sync_list);
 	INIT_LIST_HEAD(&pool->wait_for_free);
 	INIT_WORK(&pool->sync_work, mlx5_crypto_dek_sync_work_fn);
+	spin_lock_init(&pool->destroy_lock);
+	INIT_LIST_HEAD(&pool->destroy_list);
+	INIT_WORK(&pool->destroy_work, mlx5_crypto_dek_destroy_work_fn);
 
 	return pool;
 }
@@ -648,20 +701,23 @@ void mlx5_crypto_dek_pool_destroy(struct mlx5_crypto_dek_pool *pool)
 	struct mlx5_crypto_dek_bulk *bulk, *tmp;
 
 	cancel_work_sync(&pool->sync_work);
+	cancel_work_sync(&pool->destroy_work);
 
 	mlx5_crypto_dek_pool_free_wait_keys(pool);
 
 	list_for_each_entry_safe(bulk, tmp, &pool->avail_list, entry)
-		mlx5_crypto_dek_pool_remove_bulk(pool, bulk);
+		mlx5_crypto_dek_pool_remove_bulk(pool, bulk, false);
 
 	list_for_each_entry_safe(bulk, tmp, &pool->full_list, entry)
-		mlx5_crypto_dek_pool_remove_bulk(pool, bulk);
+		mlx5_crypto_dek_pool_remove_bulk(pool, bulk, false);
 
 	list_for_each_entry_safe(bulk, tmp, &pool->sync_list, entry)
-		mlx5_crypto_dek_pool_remove_bulk(pool, bulk);
+		mlx5_crypto_dek_pool_remove_bulk(pool, bulk, false);
 
 	list_for_each_entry_safe(bulk, tmp, &pool->partial_list, entry)
-		mlx5_crypto_dek_pool_remove_bulk(pool, bulk);
+		mlx5_crypto_dek_pool_remove_bulk(pool, bulk, false);
+
+	mlx5_crypto_dek_free_destroy_list(&pool->destroy_list);
 
 	mutex_destroy(&pool->lock);
 
-- 
2.39.1


  parent reply	other threads:[~2023-01-31  3:13 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-31  3:11 [pull request][net-next 00/15] mlx5 updates 2023-01-30 Saeed Mahameed
2023-01-31  3:11 ` [net-next 01/15] net/mlx5: Header file for crypto Saeed Mahameed
2023-02-01  5:50   ` patchwork-bot+netdevbpf
2023-01-31  3:11 ` [net-next 02/15] net/mlx5: Add IFC bits for general obj create param Saeed Mahameed
2023-01-31  3:11 ` [net-next 03/15] net/mlx5: Add IFC bits and enums for crypto key Saeed Mahameed
2023-01-31  3:11 ` [net-next 04/15] net/mlx5: Change key type to key purpose Saeed Mahameed
2023-01-31  3:11 ` [net-next 05/15] net/mlx5: Prepare for fast crypto key update if hardware supports it Saeed Mahameed
2023-01-31  3:11 ` [net-next 06/15] net/mlx5: Add const to the key pointer of encryption key creation Saeed Mahameed
2023-01-31  3:11 ` [net-next 07/15] net/mlx5: Refactor the " Saeed Mahameed
2023-01-31  3:11 ` [net-next 08/15] net/mlx5: Add new APIs for fast update encryption key Saeed Mahameed
2023-01-31  3:11 ` [net-next 09/15] net/mlx5: Add support SYNC_CRYPTO command Saeed Mahameed
2023-01-31  3:11 ` [net-next 10/15] net/mlx5: Add bulk allocation and modify_dek operation Saeed Mahameed
2023-01-31  3:11 ` [net-next 11/15] net/mlx5: Use bulk allocation for fast update encryption key Saeed Mahameed
2023-01-31  3:11 ` [net-next 12/15] net/mlx5: Reuse DEKs after executing SYNC_CRYPTO command Saeed Mahameed
2023-01-31  3:11 ` Saeed Mahameed [this message]
2023-01-31  3:12 ` [net-next 14/15] net/mlx5: Keep only one bulk of full available DEKs Saeed Mahameed
2023-01-31  3:12 ` [net-next 15/15] net/mlx5e: kTLS, Improve connection rate by using fast update encryption key Saeed Mahameed

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230131031201.35336-14-saeed@kernel.org \
    --to=saeed@kernel.org \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=jianbol@nvidia.com \
    --cc=kuba@kernel.org \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    --cc=saeedm@nvidia.com \
    --cc=tariqt@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).