netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Saeed Mahameed <saeed@kernel.org>
To: "David S. Miller" <davem@davemloft.net>,
	Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
	Eric Dumazet <edumazet@google.com>
Cc: Saeed Mahameed <saeedm@nvidia.com>,
	netdev@vger.kernel.org, Tariq Toukan <tariqt@nvidia.com>,
	Maher Sanalla <msanalla@nvidia.com>,
	Shay Drory <shayd@nvidia.com>, Moshe Shemesh <moshe@nvidia.com>
Subject: [net-next 05/15] net/mlx5: Use xarray to store and manage completion EQs
Date: Mon,  7 Aug 2023 10:56:32 -0700	[thread overview]
Message-ID: <20230807175642.20834-6-saeed@kernel.org> (raw)
In-Reply-To: <20230807175642.20834-1-saeed@kernel.org>

From: Maher Sanalla <msanalla@nvidia.com>

Use xarray to store the completion EQs instead of a linked list.
The xarray offers more scalability, reduced memory overhead, and
facilitates the lookup of a certain EQ given a vector index.

Signed-off-by: Maher Sanalla <msanalla@nvidia.com>
Reviewed-by: Shay Drory <shayd@nvidia.com>
Reviewed-by: Moshe Shemesh <moshe@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
---
 drivers/net/ethernet/mellanox/mlx5/core/eq.c | 50 ++++++++++----------
 1 file changed, 24 insertions(+), 26 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index c01a5d8dbe9b..b343c0fd621e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -47,7 +47,7 @@ enum {
 static_assert(MLX5_EQ_POLLING_BUDGET <= MLX5_NUM_SPARE_EQE);
 
 struct mlx5_eq_table {
-	struct list_head        comp_eqs_list;
+	struct xarray           comp_eqs;
 	struct mlx5_eq_async    pages_eq;
 	struct mlx5_eq_async    cmd_eq;
 	struct mlx5_eq_async    async_eq;
@@ -455,6 +455,7 @@ int mlx5_eq_table_init(struct mlx5_core_dev *dev)
 
 	eq_table->irq_table = mlx5_irq_table_get(dev);
 	cpumask_clear(&eq_table->used_cpus);
+	xa_init(&eq_table->comp_eqs);
 	xa_init(&eq_table->comp_irqs);
 	eq_table->curr_comp_eqs = 0;
 	return 0;
@@ -466,6 +467,7 @@ void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev)
 
 	mlx5_eq_debugfs_cleanup(dev);
 	xa_destroy(&table->comp_irqs);
+	xa_destroy(&table->comp_eqs);
 	kvfree(table);
 }
 
@@ -928,12 +930,12 @@ static void free_rmap(struct mlx5_core_dev *mdev) {}
 static void destroy_comp_eqs(struct mlx5_core_dev *dev)
 {
 	struct mlx5_eq_table *table = dev->priv.eq_table;
-	struct mlx5_eq_comp *eq, *n;
+	struct mlx5_eq_comp *eq;
 	struct mlx5_irq *irq;
 	unsigned long index;
 
-	list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
-		list_del(&eq->list);
+	xa_for_each(&table->comp_eqs, index, eq) {
+		xa_erase(&table->comp_eqs, index);
 		mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
 		if (destroy_unmap_eq(dev, &eq->core))
 			mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n",
@@ -988,7 +990,6 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
 		goto err_irqs_req;
 
 	table->max_comp_eqs = vecidx;
-	INIT_LIST_HEAD(&table->comp_eqs_list);
 	nent = comp_eq_depth_devlink_param_get(dev);
 
 	xa_for_each(&table->comp_irqs, index, irq)
@@ -1022,13 +1023,16 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
 		}
 
 		mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn);
-		/* add tail, to keep the list ordered, for mlx5_vector2eqn to work */
-		list_add_tail(&eq->list, &table->comp_eqs_list);
+		err = xa_err(xa_store(&table->comp_eqs, index, eq, GFP_KERNEL));
+		if (err)
+			goto disable_eq;
 		table->curr_comp_eqs++;
 	}
 
 	return 0;
 
+disable_eq:
+	mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
 clean_eq:
 	kfree(eq);
 clean:
@@ -1043,21 +1047,16 @@ static int vector2eqnirqn(struct mlx5_core_dev *dev, int vector, int *eqn,
 {
 	struct mlx5_eq_table *table = dev->priv.eq_table;
 	struct mlx5_eq_comp *eq;
-	int err = -ENOENT;
-	int i = 0;
 
-	list_for_each_entry(eq, &table->comp_eqs_list, list) {
-		if (i++ == vector) {
-			if (irqn)
-				*irqn = eq->core.irqn;
-			if (eqn)
-				*eqn = eq->core.eqn;
-			err = 0;
-			break;
-		}
-	}
+	eq = xa_load(&table->comp_eqs, vector);
+	if (!eq)
+		return -ENOENT;
 
-	return err;
+	if (irqn)
+		*irqn = eq->core.irqn;
+	if (eqn)
+		*eqn = eq->core.eqn;
+	return 0;
 }
 
 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn)
@@ -1082,12 +1081,10 @@ mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
 {
 	struct mlx5_eq_table *table = dev->priv.eq_table;
 	struct mlx5_eq_comp *eq;
-	int i = 0;
 
-	list_for_each_entry(eq, &table->comp_eqs_list, list) {
-		if (i++ == vector)
-			return mlx5_irq_get_affinity_mask(eq->core.irq);
-	}
+	eq = xa_load(&table->comp_eqs, vector);
+	if (eq)
+		return mlx5_irq_get_affinity_mask(eq->core.irq);
 
 	WARN_ON_ONCE(1);
 	return NULL;
@@ -1105,8 +1102,9 @@ struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
 {
 	struct mlx5_eq_table *table = dev->priv.eq_table;
 	struct mlx5_eq_comp *eq;
+	unsigned long index;
 
-	list_for_each_entry(eq, &table->comp_eqs_list, list) {
+	xa_for_each(&table->comp_eqs, index, eq) {
 		if (eq->core.eqn == eqn)
 			return eq;
 	}
-- 
2.41.0


  parent reply	other threads:[~2023-08-07 17:56 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-08-07 17:56 [pull request][net-next 00/15] mlx5 updates 2023-08-07 Saeed Mahameed
2023-08-07 17:56 ` [net-next 01/15] net/mlx5: Track the current number of completion EQs Saeed Mahameed
2023-08-08 23:40   ` patchwork-bot+netdevbpf
2023-08-07 17:56 ` [net-next 02/15] net/mlx5: Refactor completion IRQ request/release API Saeed Mahameed
2023-08-07 17:56 ` [net-next 03/15] net/mlx5: Use xarray to store and manage completion IRQs Saeed Mahameed
2023-08-07 17:56 ` [net-next 04/15] net/mlx5: Refactor completion IRQ request/release handlers in EQ layer Saeed Mahameed
2023-08-07 17:56 ` Saeed Mahameed [this message]
2023-08-07 17:56 ` [net-next 06/15] net/mlx5: Implement single completion EQ create/destroy methods Saeed Mahameed
2023-08-07 17:56 ` [net-next 07/15] net/mlx5: Introduce mlx5_cpumask_default_spread Saeed Mahameed
2023-08-07 17:56 ` [net-next 08/15] net/mlx5: Add IRQ vector to CPU lookup function Saeed Mahameed
2023-08-07 17:56 ` [net-next 09/15] net/mlx5: Rename mlx5_comp_vectors_count() to mlx5_comp_vectors_max() Saeed Mahameed
2023-08-07 17:56 ` [net-next 10/15] net/mlx5: Handle SF IRQ request in the absence of SF IRQ pool Saeed Mahameed
2023-08-07 17:56 ` [net-next 11/15] net/mlx5: Allocate completion EQs dynamically Saeed Mahameed
2023-08-07 17:56 ` [net-next 12/15] net/mlx5: remove many unnecessary NULL values Saeed Mahameed
2023-08-07 17:56 ` [net-next 13/15] net/mlx5: Fix typo reminder -> remainder Saeed Mahameed
2023-08-07 17:56 ` [net-next 14/15] net/mlx5: E-Switch, Remove redundant arg ignore_flow_lvl Saeed Mahameed
2023-08-07 17:56 ` [net-next 15/15] net/mlx5: Bridge, Only handle registered netdev bridge events Saeed Mahameed

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230807175642.20834-6-saeed@kernel.org \
    --to=saeed@kernel.org \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=kuba@kernel.org \
    --cc=moshe@nvidia.com \
    --cc=msanalla@nvidia.com \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    --cc=saeedm@nvidia.com \
    --cc=shayd@nvidia.com \
    --cc=tariqt@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).