From: Saeed Mahameed <saeed@kernel.org>
To: "David S. Miller" <davem@davemloft.net>,
Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
Eric Dumazet <edumazet@google.com>
Cc: Saeed Mahameed <saeedm@nvidia.com>,
netdev@vger.kernel.org, Tariq Toukan <tariqt@nvidia.com>,
Maher Sanalla <msanalla@nvidia.com>,
Shay Drory <shayd@nvidia.com>, Moshe Shemesh <moshe@nvidia.com>
Subject: [net-next 01/15] net/mlx5: Track the current number of completion EQs
Date: Mon, 7 Aug 2023 10:56:28 -0700 [thread overview]
Message-ID: <20230807175642.20834-2-saeed@kernel.org> (raw)
In-Reply-To: <20230807175642.20834-1-saeed@kernel.org>
From: Maher Sanalla <msanalla@nvidia.com>
In preparation to allocate completion EQs, add a counter to track the
number of completion EQs currently allocated. Store the maximum number
of EQs in max_comp_eqs variable.
Signed-off-by: Maher Sanalla <msanalla@nvidia.com>
Reviewed-by: Shay Drory <shayd@nvidia.com>
Reviewed-by: Moshe Shemesh <moshe@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
---
drivers/net/ethernet/mellanox/mlx5/core/eq.c | 24 ++++++++++++--------
1 file changed, 14 insertions(+), 10 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 3db4866d7880..66257f7879b7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -58,7 +58,8 @@ struct mlx5_eq_table {
struct mlx5_nb cq_err_nb;
struct mutex lock; /* sync async eqs creations */
- int num_comp_eqs;
+ int curr_comp_eqs;
+ int max_comp_eqs;
struct mlx5_irq_table *irq_table;
struct mlx5_irq **comp_irqs;
struct mlx5_irq *ctrl_irq;
@@ -452,6 +453,7 @@ int mlx5_eq_table_init(struct mlx5_core_dev *dev)
ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]);
eq_table->irq_table = mlx5_irq_table_get(dev);
+ eq_table->curr_comp_eqs = 0;
return 0;
}
@@ -807,7 +809,7 @@ static void comp_irqs_release_pci(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
- mlx5_irqs_release_vectors(table->comp_irqs, table->num_comp_eqs);
+ mlx5_irqs_release_vectors(table->comp_irqs, table->max_comp_eqs);
}
static int comp_irqs_request_pci(struct mlx5_core_dev *dev)
@@ -821,7 +823,7 @@ static int comp_irqs_request_pci(struct mlx5_core_dev *dev)
int cpu;
int i;
- ncomp_eqs = table->num_comp_eqs;
+ ncomp_eqs = table->max_comp_eqs;
cpus = kcalloc(ncomp_eqs, sizeof(*cpus), GFP_KERNEL);
if (!cpus)
return -ENOMEM;
@@ -847,13 +849,13 @@ static void comp_irqs_release_sf(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
- mlx5_irq_affinity_irqs_release(dev, table->comp_irqs, table->num_comp_eqs);
+ mlx5_irq_affinity_irqs_release(dev, table->comp_irqs, table->max_comp_eqs);
}
static int comp_irqs_request_sf(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
- int ncomp_eqs = table->num_comp_eqs;
+ int ncomp_eqs = table->max_comp_eqs;
return mlx5_irq_affinity_irqs_request_auto(dev, ncomp_eqs, table->comp_irqs);
}
@@ -874,7 +876,7 @@ static int comp_irqs_request(struct mlx5_core_dev *dev)
int ncomp_eqs;
int ret;
- ncomp_eqs = table->num_comp_eqs;
+ ncomp_eqs = table->max_comp_eqs;
table->comp_irqs = kcalloc(ncomp_eqs, sizeof(*table->comp_irqs), GFP_KERNEL);
if (!table->comp_irqs)
return -ENOMEM;
@@ -901,7 +903,7 @@ static int alloc_rmap(struct mlx5_core_dev *mdev)
if (mlx5_core_is_sf(mdev))
return 0;
- eq_table->rmap = alloc_irq_cpu_rmap(eq_table->num_comp_eqs);
+ eq_table->rmap = alloc_irq_cpu_rmap(eq_table->max_comp_eqs);
if (!eq_table->rmap)
return -ENOMEM;
return 0;
@@ -934,6 +936,7 @@ static void destroy_comp_eqs(struct mlx5_core_dev *dev)
eq->core.eqn);
tasklet_disable(&eq->tasklet_ctx.task);
kfree(eq);
+ table->curr_comp_eqs--;
}
comp_irqs_release(dev);
free_rmap(dev);
@@ -973,6 +976,7 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
goto err_irqs_req;
}
+ table->max_comp_eqs = ncomp_eqs;
INIT_LIST_HEAD(&table->comp_eqs_list);
nent = comp_eq_depth_devlink_param_get(dev);
@@ -1008,9 +1012,9 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn);
/* add tail, to keep the list ordered, for mlx5_vector2eqn to work */
list_add_tail(&eq->list, &table->comp_eqs_list);
+ table->curr_comp_eqs++;
}
- table->num_comp_eqs = ncomp_eqs;
return 0;
clean_eq:
@@ -1057,7 +1061,7 @@ int mlx5_vector2irqn(struct mlx5_core_dev *dev, int vector, unsigned int *irqn)
unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
{
- return dev->priv.eq_table->num_comp_eqs;
+ return dev->priv.eq_table->max_comp_eqs;
}
EXPORT_SYMBOL(mlx5_comp_vectors_count);
@@ -1148,7 +1152,7 @@ int mlx5_eq_table_create(struct mlx5_core_dev *dev)
struct mlx5_eq_table *eq_table = dev->priv.eq_table;
int err;
- eq_table->num_comp_eqs = get_num_eqs(dev);
+ eq_table->max_comp_eqs = get_num_eqs(dev);
err = create_async_eqs(dev);
if (err) {
mlx5_core_err(dev, "Failed to create async EQs\n");
--
2.41.0
next prev parent reply other threads:[~2023-08-07 17:56 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-08-07 17:56 [pull request][net-next 00/15] mlx5 updates 2023-08-07 Saeed Mahameed
2023-08-07 17:56 ` Saeed Mahameed [this message]
2023-08-08 23:40 ` [net-next 01/15] net/mlx5: Track the current number of completion EQs patchwork-bot+netdevbpf
2023-08-07 17:56 ` [net-next 02/15] net/mlx5: Refactor completion IRQ request/release API Saeed Mahameed
2023-08-07 17:56 ` [net-next 03/15] net/mlx5: Use xarray to store and manage completion IRQs Saeed Mahameed
2023-08-07 17:56 ` [net-next 04/15] net/mlx5: Refactor completion IRQ request/release handlers in EQ layer Saeed Mahameed
2023-08-07 17:56 ` [net-next 05/15] net/mlx5: Use xarray to store and manage completion EQs Saeed Mahameed
2023-08-07 17:56 ` [net-next 06/15] net/mlx5: Implement single completion EQ create/destroy methods Saeed Mahameed
2023-08-07 17:56 ` [net-next 07/15] net/mlx5: Introduce mlx5_cpumask_default_spread Saeed Mahameed
2023-08-07 17:56 ` [net-next 08/15] net/mlx5: Add IRQ vector to CPU lookup function Saeed Mahameed
2023-08-07 17:56 ` [net-next 09/15] net/mlx5: Rename mlx5_comp_vectors_count() to mlx5_comp_vectors_max() Saeed Mahameed
2023-08-07 17:56 ` [net-next 10/15] net/mlx5: Handle SF IRQ request in the absence of SF IRQ pool Saeed Mahameed
2023-08-07 17:56 ` [net-next 11/15] net/mlx5: Allocate completion EQs dynamically Saeed Mahameed
2023-08-07 17:56 ` [net-next 12/15] net/mlx5: remove many unnecessary NULL values Saeed Mahameed
2023-08-07 17:56 ` [net-next 13/15] net/mlx5: Fix typo reminder -> remainder Saeed Mahameed
2023-08-07 17:56 ` [net-next 14/15] net/mlx5: E-Switch, Remove redundant arg ignore_flow_lvl Saeed Mahameed
2023-08-07 17:56 ` [net-next 15/15] net/mlx5: Bridge, Only handle registered netdev bridge events Saeed Mahameed
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230807175642.20834-2-saeed@kernel.org \
--to=saeed@kernel.org \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=kuba@kernel.org \
--cc=moshe@nvidia.com \
--cc=msanalla@nvidia.com \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=saeedm@nvidia.com \
--cc=shayd@nvidia.com \
--cc=tariqt@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).