From: Saeed Mahameed <saeed@kernel.org>
To: "David S. Miller" <davem@davemloft.net>,
Jakub Kicinski <kuba@kernel.org>
Cc: netdev@vger.kernel.org, Shay Drory <shayd@nvidia.com>,
Moshe Shemesh <moshe@nvidia.com>,
Saeed Mahameed <saeedm@nvidia.com>
Subject: [PATCH net-next 02/15] net/mlx5: Introduce control IRQ request API
Date: Thu, 6 Jan 2022 16:29:43 -0800 [thread overview]
Message-ID: <20220107002956.74849-3-saeed@kernel.org> (raw)
In-Reply-To: <20220107002956.74849-1-saeed@kernel.org>
From: Shay Drory <shayd@nvidia.com>
Currently, IRQ layer have a separate flow for ctrl and comp IRQs, and
the distinction between ctrl and comp IRQs is done in the IRQ layer.
In order to ease the coding and maintenance of the IRQ layer,
introduce a new API for requesting control IRQs -
mlx5_ctrl_irq_request(struct mlx5_core_dev *dev).
Signed-off-by: Shay Drory <shayd@nvidia.com>
Reviewed-by: Moshe Shemesh <moshe@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
---
drivers/net/ethernet/mellanox/mlx5/core/eq.c | 5 +-
.../ethernet/mellanox/mlx5/core/mlx5_irq.h | 1 +
.../net/ethernet/mellanox/mlx5/core/pci_irq.c | 62 ++++++++++++++++---
3 files changed, 58 insertions(+), 10 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index b695aad71ee1..1eb0326a489b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -289,7 +289,10 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
mlx5_init_fbc(eq->frag_buf.frags, log_eq_stride, log_eq_size, &eq->fbc);
init_eq_buf(eq);
- eq->irq = mlx5_irq_request(dev, vecidx, param->affinity);
+ if (vecidx == MLX5_IRQ_EQ_CTRL)
+ eq->irq = mlx5_ctrl_irq_request(dev);
+ else
+ eq->irq = mlx5_irq_request(dev, vecidx, param->affinity);
if (IS_ERR(eq->irq)) {
err = PTR_ERR(eq->irq);
goto err_buf;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
index 8116815663a7..7028e4b43837 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
@@ -22,6 +22,7 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int devfn,
int msix_vec_count);
int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs);
+struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev);
struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
struct cpumask *affinity);
void mlx5_irq_release(struct mlx5_irq *irq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 163e01fe500e..510a9b91ff9a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -396,23 +396,36 @@ irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx,
if (IS_ERR(irq) || !affinity)
goto unlock;
cpumask_copy(irq->mask, affinity);
- if (!irq_pool_is_sf_pool(pool) && !pool->xa_num_irqs.max &&
- cpumask_empty(irq->mask))
- cpumask_set_cpu(cpumask_first(cpu_online_mask), irq->mask);
irq_set_affinity_hint(irq->irqn, irq->mask);
unlock:
mutex_unlock(&pool->lock);
return irq;
}
-static struct mlx5_irq_pool *find_sf_irq_pool(struct mlx5_irq_table *irq_table,
- int i, struct cpumask *affinity)
+static struct mlx5_irq_pool *sf_ctrl_irq_pool_get(struct mlx5_irq_table *irq_table)
+{
+ return irq_table->sf_ctrl_pool;
+}
+
+static struct mlx5_irq_pool *sf_irq_pool_get(struct mlx5_irq_table *irq_table)
{
- if (cpumask_empty(affinity) && i == MLX5_IRQ_EQ_CTRL)
- return irq_table->sf_ctrl_pool;
return irq_table->sf_comp_pool;
}
+static struct mlx5_irq_pool *ctrl_irq_pool_get(struct mlx5_core_dev *dev)
+{
+ struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
+ struct mlx5_irq_pool *pool = NULL;
+
+ if (mlx5_core_is_sf(dev))
+ pool = sf_ctrl_irq_pool_get(irq_table);
+
+ /* In some configs, there won't be a pool of SFs IRQs. Hence, returning
+ * the PF IRQs pool in case the SF pool doesn't exist.
+ */
+ return pool ? pool : irq_table->pf_pool;
+}
+
/**
* mlx5_irq_release - release an IRQ back to the system.
* @irq: irq to be released.
@@ -423,6 +436,38 @@ void mlx5_irq_release(struct mlx5_irq *irq)
irq_put(irq);
}
+/**
+ * mlx5_ctrl_irq_request - request a ctrl IRQ for mlx5 device.
+ * @dev: mlx5 device that requesting the IRQ.
+ *
+ * This function returns a pointer to IRQ, or ERR_PTR in case of error.
+ */
+struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev)
+{
+ struct mlx5_irq_pool *pool = ctrl_irq_pool_get(dev);
+ cpumask_var_t req_mask;
+ struct mlx5_irq *irq;
+
+ if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL))
+ return ERR_PTR(-ENOMEM);
+ cpumask_copy(req_mask, cpu_online_mask);
+ if (!irq_pool_is_sf_pool(pool)) {
+ /* In case we are allocating a control IRQ for PF/VF */
+ if (!pool->xa_num_irqs.max) {
+ cpumask_clear(req_mask);
+ /* In case we only have a single IRQ for PF/VF */
+ cpumask_set_cpu(cpumask_first(cpu_online_mask), req_mask);
+ }
+ /* Allocate the IRQ in the last index of the pool */
+ irq = irq_pool_request_vector(pool, pool->xa_num_irqs.max, req_mask);
+ } else {
+ irq = irq_pool_request_affinity(pool, req_mask);
+ }
+
+ free_cpumask_var(req_mask);
+ return irq;
+}
+
/**
* mlx5_irq_request - request an IRQ for mlx5 device.
* @dev: mlx5 device that requesting the IRQ.
@@ -440,7 +485,7 @@ struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
struct mlx5_irq *irq;
if (mlx5_core_is_sf(dev)) {
- pool = find_sf_irq_pool(irq_table, vecidx, affinity);
+ pool = sf_irq_pool_get(irq_table);
if (!pool)
/* we don't have IRQs for SFs, using the PF IRQs */
goto pf_irq;
@@ -453,7 +498,6 @@ struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
}
pf_irq:
pool = irq_table->pf_pool;
- vecidx = (vecidx == MLX5_IRQ_EQ_CTRL) ? pool->xa_num_irqs.max : vecidx;
irq = irq_pool_request_vector(pool, vecidx, affinity);
out:
if (IS_ERR(irq))
--
2.33.1
next prev parent reply other threads:[~2022-01-07 0:30 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-01-07 0:29 [pull request][net-next 00/15] mlx5 updates 2022-01-06 Saeed Mahameed
2022-01-07 0:29 ` [PATCH net-next 01/15] net/mlx5: mlx5e_hv_vhca_stats_create return type to void Saeed Mahameed
2022-01-07 11:30 ` patchwork-bot+netdevbpf
2022-01-07 0:29 ` Saeed Mahameed [this message]
2022-01-07 0:29 ` [PATCH net-next 03/15] net/mlx5: Move affinity assignment into irq_request Saeed Mahameed
2022-01-07 0:29 ` [PATCH net-next 04/15] net/mlx5: Split irq_pool_affinity logic to new file Saeed Mahameed
2022-01-07 0:29 ` [PATCH net-next 05/15] net/mlx5: Introduce API for bulk request and release of IRQs Saeed Mahameed
2022-01-07 0:29 ` [PATCH net-next 06/15] net/mlx5: SF, Use all available cpu for setting cpu affinity Saeed Mahameed
2022-01-07 0:29 ` [PATCH net-next 07/15] net/mlx5: Update log_max_qp value to FW max capability Saeed Mahameed
2022-01-07 0:29 ` [PATCH net-next 08/15] net/mlx5e: Expose FEC counters via ethtool Saeed Mahameed
2022-01-07 0:29 ` [PATCH net-next 09/15] net/mlx5e: Unblock setting vid 0 for VF in case PF isn't eswitch manager Saeed Mahameed
2022-01-07 0:29 ` [PATCH net-next 10/15] net/mlx5e: Fix feature check per profile Saeed Mahameed
2022-01-07 0:29 ` [PATCH net-next 11/15] net/mlx5e: Move HW-GRO and CQE compression check to fix features flow Saeed Mahameed
2022-01-07 0:29 ` [PATCH net-next 12/15] net/mlx5e: Refactor set_pflag_cqe_based_moder Saeed Mahameed
2022-01-07 0:29 ` [PATCH net-next 13/15] net/mlx5e: TC, Remove redundant error logging Saeed Mahameed
2022-01-07 0:29 ` [PATCH net-next 14/15] net/mlx5e: Add recovery flow in case of error CQE Saeed Mahameed
2022-01-07 0:29 ` [PATCH net-next 15/15] Documentation: devlink: mlx5.rst: Fix htmldoc build warning Saeed Mahameed
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220107002956.74849-3-saeed@kernel.org \
--to=saeed@kernel.org \
--cc=davem@davemloft.net \
--cc=kuba@kernel.org \
--cc=moshe@nvidia.com \
--cc=netdev@vger.kernel.org \
--cc=saeedm@nvidia.com \
--cc=shayd@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).