From: Mark Bloch <mbloch@nvidia.com>
To: "David S. Miller" <davem@davemloft.net>,
Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
Eric Dumazet <edumazet@google.com>,
"Andrew Lunn" <andrew+netdev@lunn.ch>
Cc: <saeedm@nvidia.com>, <gal@nvidia.com>, <leonro@nvidia.com>,
<tariqt@nvidia.com>, Leon Romanovsky <leon@kernel.org>,
Simon Horman <horms@kernel.org>,
Richard Cochran <richardcochran@gmail.com>,
"Alexei Starovoitov" <ast@kernel.org>,
Daniel Borkmann <daniel@iogearbox.net>,
"Jesper Dangaard Brouer" <hawk@kernel.org>,
John Fastabend <john.fastabend@gmail.com>,
<netdev@vger.kernel.org>, <linux-rdma@vger.kernel.org>,
<linux-kernel@vger.kernel.org>, <bpf@vger.kernel.org>,
Dragos Tatulea <dtatulea@nvidia.com>,
Cosmin Ratiu <cratiu@nvidia.com>, Mark Bloch <mbloch@nvidia.com>
Subject: [PATCH net-next v3 07/12] net/mlx5e: SHAMPO: Headers page pool stats
Date: Mon, 9 Jun 2025 17:58:28 +0300 [thread overview]
Message-ID: <20250609145833.990793-8-mbloch@nvidia.com> (raw)
In-Reply-To: <20250609145833.990793-1-mbloch@nvidia.com>
From: Saeed Mahameed <saeedm@nvidia.com>
Expose the stats of the new headers page pool.
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Reviewed-by: Dragos Tatulea <dtatulea@nvidia.com>
Signed-off-by: Cosmin Ratiu <cratiu@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: Mark Bloch <mbloch@nvidia.com>
---
.../ethernet/mellanox/mlx5/core/en_stats.c | 54 +++++++++++++++++++
.../ethernet/mellanox/mlx5/core/en_stats.h | 24 +++++++++
2 files changed, 78 insertions(+)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 19664fa7f217..8422afbfa419 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -205,6 +205,18 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_released_ref) },
+
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_hd_alloc_fast) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_hd_alloc_slow) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_hd_alloc_slow_high_order) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_hd_alloc_empty) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_hd_alloc_refill) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_hd_alloc_waive) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_hd_recycle_cached) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_hd_recycle_cache_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_hd_recycle_ring) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_hd_recycle_ring_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_hd_recycle_released_ref) },
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
@@ -384,6 +396,18 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_pp_recycle_ring += rq_stats->pp_recycle_ring;
s->rx_pp_recycle_ring_full += rq_stats->pp_recycle_ring_full;
s->rx_pp_recycle_released_ref += rq_stats->pp_recycle_released_ref;
+
+ s->rx_pp_hd_alloc_fast += rq_stats->pp_hd_alloc_fast;
+ s->rx_pp_hd_alloc_slow += rq_stats->pp_hd_alloc_slow;
+ s->rx_pp_hd_alloc_empty += rq_stats->pp_hd_alloc_empty;
+ s->rx_pp_hd_alloc_refill += rq_stats->pp_hd_alloc_refill;
+ s->rx_pp_hd_alloc_waive += rq_stats->pp_hd_alloc_waive;
+ s->rx_pp_hd_alloc_slow_high_order += rq_stats->pp_hd_alloc_slow_high_order;
+ s->rx_pp_hd_recycle_cached += rq_stats->pp_hd_recycle_cached;
+ s->rx_pp_hd_recycle_cache_full += rq_stats->pp_hd_recycle_cache_full;
+ s->rx_pp_hd_recycle_ring += rq_stats->pp_hd_recycle_ring;
+ s->rx_pp_hd_recycle_ring_full += rq_stats->pp_hd_recycle_ring_full;
+ s->rx_pp_hd_recycle_released_ref += rq_stats->pp_hd_recycle_released_ref;
#ifdef CONFIG_MLX5_EN_TLS
s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets;
s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes;
@@ -511,6 +535,24 @@ static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
rq_stats->pp_recycle_ring = stats.recycle_stats.ring;
rq_stats->pp_recycle_ring_full = stats.recycle_stats.ring_full;
rq_stats->pp_recycle_released_ref = stats.recycle_stats.released_refcnt;
+
+ pool = c->rq.hd_page_pool;
+ if (!pool || pool == c->rq.page_pool ||
+ !page_pool_get_stats(pool, &stats))
+ return;
+
+ rq_stats->pp_hd_alloc_fast = stats.alloc_stats.fast;
+ rq_stats->pp_hd_alloc_slow = stats.alloc_stats.slow;
+ rq_stats->pp_hd_alloc_slow_high_order = stats.alloc_stats.slow_high_order;
+ rq_stats->pp_hd_alloc_empty = stats.alloc_stats.empty;
+ rq_stats->pp_hd_alloc_waive = stats.alloc_stats.waive;
+ rq_stats->pp_hd_alloc_refill = stats.alloc_stats.refill;
+
+ rq_stats->pp_hd_recycle_cached = stats.recycle_stats.cached;
+ rq_stats->pp_hd_recycle_cache_full = stats.recycle_stats.cache_full;
+ rq_stats->pp_hd_recycle_ring = stats.recycle_stats.ring;
+ rq_stats->pp_hd_recycle_ring_full = stats.recycle_stats.ring_full;
+ rq_stats->pp_hd_recycle_released_ref = stats.recycle_stats.released_refcnt;
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
@@ -2130,6 +2172,18 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring_full) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_released_ref) },
+
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_hd_alloc_fast) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_hd_alloc_slow) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_hd_alloc_slow_high_order) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_hd_alloc_empty) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_hd_alloc_refill) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_hd_alloc_waive) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_hd_recycle_cached) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_hd_recycle_cache_full) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_hd_recycle_ring) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_hd_recycle_ring_full) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_hd_recycle_released_ref) },
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index def5dea1463d..113221dfcdfa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -226,6 +226,18 @@ struct mlx5e_sw_stats {
u64 rx_pp_recycle_ring;
u64 rx_pp_recycle_ring_full;
u64 rx_pp_recycle_released_ref;
+
+ u64 rx_pp_hd_alloc_fast;
+ u64 rx_pp_hd_alloc_slow;
+ u64 rx_pp_hd_alloc_slow_high_order;
+ u64 rx_pp_hd_alloc_empty;
+ u64 rx_pp_hd_alloc_refill;
+ u64 rx_pp_hd_alloc_waive;
+ u64 rx_pp_hd_recycle_cached;
+ u64 rx_pp_hd_recycle_cache_full;
+ u64 rx_pp_hd_recycle_ring;
+ u64 rx_pp_hd_recycle_ring_full;
+ u64 rx_pp_hd_recycle_released_ref;
#ifdef CONFIG_MLX5_EN_TLS
u64 tx_tls_encrypted_packets;
u64 tx_tls_encrypted_bytes;
@@ -394,6 +406,18 @@ struct mlx5e_rq_stats {
u64 pp_recycle_ring;
u64 pp_recycle_ring_full;
u64 pp_recycle_released_ref;
+
+ u64 pp_hd_alloc_fast;
+ u64 pp_hd_alloc_slow;
+ u64 pp_hd_alloc_slow_high_order;
+ u64 pp_hd_alloc_empty;
+ u64 pp_hd_alloc_refill;
+ u64 pp_hd_alloc_waive;
+ u64 pp_hd_recycle_cached;
+ u64 pp_hd_recycle_cache_full;
+ u64 pp_hd_recycle_ring;
+ u64 pp_hd_recycle_ring_full;
+ u64 pp_hd_recycle_released_ref;
#ifdef CONFIG_MLX5_EN_TLS
u64 tls_decrypted_packets;
u64 tls_decrypted_bytes;
--
2.34.1
next prev parent reply other threads:[~2025-06-09 14:59 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-06-09 14:58 [PATCH net-next v3 00/12] net/mlx5e: Add support for devmem and io_uring TCP zero-copy Mark Bloch
2025-06-09 14:58 ` [PATCH net-next v3 01/12] net: Allow const args for of page_to_netmem() Mark Bloch
2025-06-09 14:58 ` [PATCH net-next v3 02/12] net: Add skb_can_coalesce for netmem Mark Bloch
2025-06-09 14:58 ` [PATCH net-next v3 03/12] net/mlx5e: SHAMPO: Reorganize mlx5_rq_shampo_alloc Mark Bloch
2025-06-09 14:58 ` [PATCH net-next v3 04/12] net/mlx5e: SHAMPO: Remove redundant params Mark Bloch
2025-06-09 14:58 ` [PATCH net-next v3 05/12] net/mlx5e: SHAMPO: Improve hw gro capability checking Mark Bloch
2025-06-09 14:58 ` [PATCH net-next v3 06/12] net/mlx5e: SHAMPO: Separate pool for headers Mark Bloch
2025-06-09 14:58 ` Mark Bloch [this message]
2025-06-09 15:21 ` [PATCH net-next v3 07/12] net/mlx5e: SHAMPO: Headers page pool stats Jakub Kicinski
2025-06-09 15:31 ` Dragos Tatulea
2025-06-09 23:22 ` Jakub Kicinski
2025-06-09 14:58 ` [PATCH net-next v3 08/12] net/mlx5e: Convert over to netmem Mark Bloch
2025-06-09 14:58 ` [PATCH net-next v3 09/12] net/mlx5e: Add support for UNREADABLE netmem page pools Mark Bloch
2025-06-09 14:58 ` [PATCH net-next v3 10/12] net/mlx5e: Implement queue mgmt ops and single channel swap Mark Bloch
2025-06-12 5:33 ` Mina Almasry
2025-06-12 9:05 ` Cosmin Ratiu
2025-06-12 20:44 ` Mina Almasry
2025-06-12 22:52 ` Jakub Kicinski
2025-06-09 14:58 ` [PATCH net-next v3 11/12] net/mlx5e: Support ethtool tcp-data-split settings Mark Bloch
2025-06-09 14:58 ` [PATCH net-next v3 12/12] net/mlx5e: Add TX support for netmems Mark Bloch
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250609145833.990793-8-mbloch@nvidia.com \
--to=mbloch@nvidia.com \
--cc=andrew+netdev@lunn.ch \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=cratiu@nvidia.com \
--cc=daniel@iogearbox.net \
--cc=davem@davemloft.net \
--cc=dtatulea@nvidia.com \
--cc=edumazet@google.com \
--cc=gal@nvidia.com \
--cc=hawk@kernel.org \
--cc=horms@kernel.org \
--cc=john.fastabend@gmail.com \
--cc=kuba@kernel.org \
--cc=leon@kernel.org \
--cc=leonro@nvidia.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-rdma@vger.kernel.org \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=richardcochran@gmail.com \
--cc=saeedm@nvidia.com \
--cc=tariqt@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).