netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Saeed Mahameed <saeed@kernel.org>
To: "David S. Miller" <davem@davemloft.net>,
	Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
	Eric Dumazet <edumazet@google.com>
Cc: Saeed Mahameed <saeedm@nvidia.com>,
	netdev@vger.kernel.org, Tariq Toukan <tariqt@nvidia.com>,
	Moshe Tal <moshet@nvidia.com>,
	Maxim Mikityanskiy <maximmi@nvidia.com>
Subject: [net-next V2 06/13] net/mlx5e: HTB, move stats and max_sqs to priv
Date: Tue, 19 Jul 2022 13:35:22 -0700	[thread overview]
Message-ID: <20220719203529.51151-7-saeed@kernel.org> (raw)
In-Reply-To: <20220719203529.51151-1-saeed@kernel.org>

From: Moshe Tal <moshet@nvidia.com>

Preparation for dynamic allocation of the HTB struct.
The statistics should be preserved even when the struct is de-allocated.

Signed-off-by: Moshe Tal <moshet@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Reviewed-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
---
 drivers/net/ethernet/mellanox/mlx5/core/en.h     |  4 ++--
 drivers/net/ethernet/mellanox/mlx5/core/en/qos.c | 16 ++++++++--------
 .../net/ethernet/mellanox/mlx5/core/en_main.c    |  6 +++---
 .../net/ethernet/mellanox/mlx5/core/en_stats.c   | 12 ++++++------
 4 files changed, 19 insertions(+), 19 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 1222156e222b..d2ed27575097 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -902,8 +902,6 @@ struct mlx5e_scratchpad {
 struct mlx5e_htb {
 	DECLARE_HASHTABLE(qos_tc2node, order_base_2(MLX5E_QOS_MAX_LEAF_NODES));
 	DECLARE_BITMAP(qos_used_qids, MLX5E_QOS_MAX_LEAF_NODES);
-	struct mlx5e_sq_stats **qos_sq_stats;
-	u16 max_qos_sqs;
 };
 
 struct mlx5e_trap;
@@ -944,6 +942,8 @@ struct mlx5e_priv {
 	struct mlx5e_channel_stats **channel_stats;
 	struct mlx5e_channel_stats trap_stats;
 	struct mlx5e_ptp_stats     ptp_stats;
+	struct mlx5e_sq_stats      **htb_qos_sq_stats;
+	u16                        htb_max_qos_sqs;
 	u16                        stats_nch;
 	u16                        max_nch;
 	u8                         max_opened_tc;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index 9a61c44e7f72..6136cad397dd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -213,11 +213,11 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs
 
 	txq_ix = mlx5e_qid_from_qos(chs, node->qid);
 
-	WARN_ON(node->qid > priv->htb.max_qos_sqs);
-	if (node->qid == priv->htb.max_qos_sqs) {
+	WARN_ON(node->qid > priv->htb_max_qos_sqs);
+	if (node->qid == priv->htb_max_qos_sqs) {
 		struct mlx5e_sq_stats *stats, **stats_list = NULL;
 
-		if (priv->htb.max_qos_sqs == 0) {
+		if (priv->htb_max_qos_sqs == 0) {
 			stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
 					      sizeof(*stats_list),
 					      GFP_KERNEL);
@@ -230,12 +230,12 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs
 			return -ENOMEM;
 		}
 		if (stats_list)
-			WRITE_ONCE(priv->htb.qos_sq_stats, stats_list);
-		WRITE_ONCE(priv->htb.qos_sq_stats[node->qid], stats);
-		/* Order max_qos_sqs increment after writing the array pointer.
+			WRITE_ONCE(priv->htb_qos_sq_stats, stats_list);
+		WRITE_ONCE(priv->htb_qos_sq_stats[node->qid], stats);
+		/* Order htb_max_qos_sqs increment after writing the array pointer.
 		 * Pairs with smp_load_acquire in en_stats.c.
 		 */
-		smp_store_release(&priv->htb.max_qos_sqs, priv->htb.max_qos_sqs + 1);
+		smp_store_release(&priv->htb_max_qos_sqs, priv->htb_max_qos_sqs + 1);
 	}
 
 	ix = node->qid % params->num_channels;
@@ -259,7 +259,7 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs
 		goto err_free_sq;
 	err = mlx5e_open_txqsq(c, priv->tisn[c->lag_port][0], txq_ix, params,
 			       &param_sq, sq, 0, node->hw_id,
-			       priv->htb.qos_sq_stats[node->qid]);
+			       priv->htb_qos_sq_stats[node->qid]);
 	if (err)
 		goto err_close_cq;
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 700bca033769..fed24b5a0bae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -5372,9 +5372,9 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
 	mutex_unlock(&priv->state_lock);
 	free_cpumask_var(priv->scratchpad.cpumask);
 
-	for (i = 0; i < priv->htb.max_qos_sqs; i++)
-		kfree(priv->htb.qos_sq_stats[i]);
-	kvfree(priv->htb.qos_sq_stats);
+	for (i = 0; i < priv->htb_max_qos_sqs; i++)
+		kfree(priv->htb_qos_sq_stats[i]);
+	kvfree(priv->htb_qos_sq_stats);
 
 	memset(priv, 0, sizeof(*priv));
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 1e87bb2b7541..1a88406ee6d2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -474,8 +474,8 @@ static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
 	int i;
 
 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
-	max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
-	stats = READ_ONCE(priv->htb.qos_sq_stats);
+	max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
+	stats = READ_ONCE(priv->htb_qos_sq_stats);
 
 	for (i = 0; i < max_qos_sqs; i++) {
 		mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i]));
@@ -2184,13 +2184,13 @@ static const struct counter_desc qos_sq_stats_desc[] = {
 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)
 {
 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
-	return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb.max_qos_sqs);
+	return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb_max_qos_sqs);
 }
 
 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)
 {
 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
-	u16 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
+	u16 max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
 	int i, qid;
 
 	for (qid = 0; qid < max_qos_sqs; qid++)
@@ -2208,8 +2208,8 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)
 	int i, qid;
 
 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
-	max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
-	stats = READ_ONCE(priv->htb.qos_sq_stats);
+	max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
+	stats = READ_ONCE(priv->htb_qos_sq_stats);
 
 	for (qid = 0; qid < max_qos_sqs; qid++) {
 		struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]);
-- 
2.36.1


  parent reply	other threads:[~2022-07-19 20:35 UTC|newest]

Thread overview: 15+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-19 20:35 [pull request][net-next V2 00/13] mlx5 updates 2022-07-17 Saeed Mahameed
2022-07-19 20:35 ` [net-next V2 01/13] net/mlx5e: Report header-data split state through ethtool Saeed Mahameed
2022-07-21  1:10   ` patchwork-bot+netdevbpf
2022-07-19 20:35 ` [net-next V2 02/13] net/mlx5e: Fix mqprio_rl handling on devlink reload Saeed Mahameed
2022-07-19 20:35 ` [net-next V2 03/13] net/mlx5e: HTB, reduce visibility of htb functions Saeed Mahameed
2022-07-19 20:35 ` [net-next V2 04/13] net/mlx5e: HTB, move ids to selq_params struct Saeed Mahameed
2022-07-19 20:35 ` [net-next V2 05/13] net/mlx5e: HTB, move section comment to the right place Saeed Mahameed
2022-07-19 20:35 ` Saeed Mahameed [this message]
2022-07-19 20:35 ` [net-next V2 07/13] net/mlx5e: HTB, hide and dynamically allocate mlx5e_htb structure Saeed Mahameed
2022-07-19 20:35 ` [net-next V2 08/13] net/mlx5e: HTB, remove priv from htb function calls Saeed Mahameed
2022-07-19 20:35 ` [net-next V2 09/13] net/mlx5e: HTB, change functions name to follow convention Saeed Mahameed
2022-07-19 20:35 ` [net-next V2 10/13] net/mlx5e: HTB, move htb functions to a new file Saeed Mahameed
2022-07-19 20:35 ` [net-next V2 11/13] net/mlx5: Expose ts_cqe_metadata_size2wqe_counter Saeed Mahameed
2022-07-19 20:35 ` [net-next V2 12/13] net/mlx5e: Add resiliency for PTP TX port timestamp Saeed Mahameed
2022-07-19 20:35 ` [net-next V2 13/13] net/mlx5: CT: Remove warning of ignore_flow_level support for non PF Saeed Mahameed

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220719203529.51151-7-saeed@kernel.org \
    --to=saeed@kernel.org \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=kuba@kernel.org \
    --cc=maximmi@nvidia.com \
    --cc=moshet@nvidia.com \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    --cc=saeedm@nvidia.com \
    --cc=tariqt@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).