* [RFC net-next 0/2] Add page_pool_stats ethtool APIs
@ 2022-04-07 16:55 Lorenzo Bianconi
2022-04-07 16:55 ` [RFC net-next 1/2] net: page_pool: introduce ethtool stats Lorenzo Bianconi
2022-04-07 16:55 ` [RFC net-next 2/2] net: mvneta: add support for page_pool_get_stats Lorenzo Bianconi
0 siblings, 2 replies; 9+ messages in thread
From: Lorenzo Bianconi @ 2022-04-07 16:55 UTC (permalink / raw)
To: netdev
Cc: lorenzo.bianconi, davem, kuba, pabeni, thomas.petazzoni,
ilias.apalodimas, jbrouer, andrew, jdamato
Introduce page_pool_stats support to mvneta driver.
Lorenzo Bianconi (2):
net: page_pool: introduce ethtool stats
net: mvneta: add support for page_pool_get_stats
drivers/net/ethernet/marvell/Kconfig | 1 +
drivers/net/ethernet/marvell/mvneta.c | 59 ++++++++++++++--
include/net/page_pool.h | 24 +++++++
net/core/page_pool.c | 96 +++++++++++++++++++++++++++
4 files changed, 175 insertions(+), 5 deletions(-)
--
2.35.1
^ permalink raw reply [flat|nested] 9+ messages in thread
* [RFC net-next 1/2] net: page_pool: introduce ethtool stats
2022-04-07 16:55 [RFC net-next 0/2] Add page_pool_stats ethtool APIs Lorenzo Bianconi
@ 2022-04-07 16:55 ` Lorenzo Bianconi
2022-04-08 3:30 ` Jakub Kicinski
2022-04-07 16:55 ` [RFC net-next 2/2] net: mvneta: add support for page_pool_get_stats Lorenzo Bianconi
1 sibling, 1 reply; 9+ messages in thread
From: Lorenzo Bianconi @ 2022-04-07 16:55 UTC (permalink / raw)
To: netdev
Cc: lorenzo.bianconi, davem, kuba, pabeni, thomas.petazzoni,
ilias.apalodimas, jbrouer, andrew, jdamato
Introduce APIs to report page_pool stats through ethtool and reduce
duplicated code in each driver.
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
---
include/net/page_pool.h | 24 +++++++++++
net/core/page_pool.c | 96 +++++++++++++++++++++++++++++++++++++++++
2 files changed, 120 insertions(+)
diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index ea5fb70e5101..17fb15b49f1a 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -117,6 +117,30 @@ struct page_pool_stats {
struct page_pool_recycle_stats recycle_stats;
};
+/* List of page_pool stats exported through ethtool. */
+enum {
+ PP_ETHTOOL_ALLOC_FAST,
+ PP_ETHTOOL_ALLOC_SLOW,
+ PP_ETHTOOL_ALLOC_SLOW_HIGH_ORDER,
+ PP_ETHTOOL_ALLOC_EMPTY,
+ PP_ETHTOOL_ALLOC_REFILL,
+ PP_ETHTOOL_ALLOC_WAIVE,
+ PP_ETHTOOL_RECYCLE_CACHED,
+ PP_ETHTOOL_RECYCLE_CACHE_FULL,
+ PP_ETHTOOL_RECYCLE_RING,
+ PP_ETHTOOL_RECYCLE_RING_FULL,
+ PP_ETHTOOL_RECYCLE_RELEASED_REF,
+ PP_ETHTOOL_STATS_MAX,
+};
+
+static inline int page_pool_ethtool_stats_get_count(void)
+{
+ return PP_ETHTOOL_STATS_MAX;
+}
+
+void page_pool_ethtool_stats_get_strings(u8 *data);
+void page_pool_ethtool_stats_get(u64 *data, struct page_pool_stats *stats);
+
/*
* Drivers that wish to harvest page pool stats and report them to users
* (perhaps via ethtool, debugfs, or another mechanism) can allocate a
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 4af55d28ffa3..7f24fc2fe58d 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -18,6 +18,7 @@
#include <linux/page-flags.h>
#include <linux/mm.h> /* for __put_page() */
#include <linux/poison.h>
+#include <linux/ethtool.h>
#include <trace/events/page_pool.h>
@@ -66,6 +67,101 @@ bool page_pool_get_stats(struct page_pool *pool,
return true;
}
EXPORT_SYMBOL(page_pool_get_stats);
+
+void page_pool_ethtool_stats_get_strings(u8 *data)
+{
+ static const struct {
+ u8 type;
+ char name[ETH_GSTRING_LEN];
+ } stats[PP_ETHTOOL_STATS_MAX] = {
+ {
+ .type = PP_ETHTOOL_ALLOC_FAST,
+ .name = "rx_pp_alloc_fast"
+ }, {
+ .type = PP_ETHTOOL_ALLOC_SLOW,
+ .name = "rx_pp_alloc_slow"
+ }, {
+ .type = PP_ETHTOOL_ALLOC_SLOW_HIGH_ORDER,
+ .name = "rx_pp_alloc_slow_ho"
+ }, {
+ .type = PP_ETHTOOL_ALLOC_EMPTY,
+ .name = "rx_pp_alloc_empty"
+ }, {
+ .type = PP_ETHTOOL_ALLOC_REFILL,
+ .name = "rx_pp_alloc_refill"
+ }, {
+ .type = PP_ETHTOOL_ALLOC_WAIVE,
+ .name = "rx_pp_alloc_waive"
+ }, {
+ .type = PP_ETHTOOL_RECYCLE_CACHED,
+ .name = "rx_pp_recycle_cached"
+ }, {
+ .type = PP_ETHTOOL_RECYCLE_CACHE_FULL,
+ .name = "rx_pp_recycle_cache_full"
+ }, {
+ .type = PP_ETHTOOL_RECYCLE_RING,
+ .name = "rx_pp_recycle_ring"
+ }, {
+ .type = PP_ETHTOOL_RECYCLE_RING_FULL,
+ .name = "rx_pp_recycle_ring_full"
+ }, {
+ .type = PP_ETHTOOL_RECYCLE_RELEASED_REF,
+ .name = "rx_pp_recycle_released_ref"
+ },
+ };
+ int i;
+
+ for (i = 0; i < PP_ETHTOOL_STATS_MAX; i++) {
+ memcpy(data, stats[i].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+}
+EXPORT_SYMBOL(page_pool_ethtool_stats_get_strings);
+
+void page_pool_ethtool_stats_get(u64 *data, struct page_pool_stats *stats)
+{
+ int i;
+
+ for (i = 0; i < PP_ETHTOOL_STATS_MAX; i++) {
+ switch (i) {
+ case PP_ETHTOOL_ALLOC_FAST:
+ *data++ = stats->alloc_stats.fast;
+ break;
+ case PP_ETHTOOL_ALLOC_SLOW:
+ *data++ = stats->alloc_stats.slow;
+ break;
+ case PP_ETHTOOL_ALLOC_SLOW_HIGH_ORDER:
+ *data++ = stats->alloc_stats.slow_high_order;
+ break;
+ case PP_ETHTOOL_ALLOC_EMPTY:
+ *data++ = stats->alloc_stats.empty;
+ break;
+ case PP_ETHTOOL_ALLOC_REFILL:
+ *data++ = stats->alloc_stats.refill;
+ break;
+ case PP_ETHTOOL_ALLOC_WAIVE:
+ *data++ = stats->alloc_stats.waive;
+ break;
+ case PP_ETHTOOL_RECYCLE_CACHED:
+ *data++ = stats->recycle_stats.cached;
+ break;
+ case PP_ETHTOOL_RECYCLE_CACHE_FULL:
+ *data++ = stats->recycle_stats.cache_full;
+ break;
+ case PP_ETHTOOL_RECYCLE_RING:
+ *data++ = stats->recycle_stats.ring;
+ break;
+ case PP_ETHTOOL_RECYCLE_RING_FULL:
+ *data++ = stats->recycle_stats.ring_full;
+ break;
+ case PP_ETHTOOL_RECYCLE_RELEASED_REF:
+ *data++ = stats->recycle_stats.released_refcnt;
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL(page_pool_ethtool_stats_get);
#else
#define alloc_stat_inc(pool, __stat)
#define recycle_stat_inc(pool, __stat)
--
2.35.1
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [RFC net-next 2/2] net: mvneta: add support for page_pool_get_stats
2022-04-07 16:55 [RFC net-next 0/2] Add page_pool_stats ethtool APIs Lorenzo Bianconi
2022-04-07 16:55 ` [RFC net-next 1/2] net: page_pool: introduce ethtool stats Lorenzo Bianconi
@ 2022-04-07 16:55 ` Lorenzo Bianconi
2022-04-07 18:25 ` Andrew Lunn
1 sibling, 1 reply; 9+ messages in thread
From: Lorenzo Bianconi @ 2022-04-07 16:55 UTC (permalink / raw)
To: netdev
Cc: lorenzo.bianconi, davem, kuba, pabeni, thomas.petazzoni,
ilias.apalodimas, jbrouer, andrew, jdamato
Introduce support for the page_pool_get_stats API to mvneta driver.
Report page_pool stats through ethtool.
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
---
drivers/net/ethernet/marvell/Kconfig | 1 +
drivers/net/ethernet/marvell/mvneta.c | 59 ++++++++++++++++++++++++---
2 files changed, 55 insertions(+), 5 deletions(-)
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index fe0989c0fc25..1240cb2dc07f 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -62,6 +62,7 @@ config MVNETA
select MVMDIO
select PHYLINK
select PAGE_POOL
+ select PAGE_POOL_STATS
help
This driver supports the network interface units in the
Marvell ARMADA XP, ARMADA 370, ARMADA 38x and
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 934f6dd90992..9864ea09e887 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -540,7 +540,7 @@ struct mvneta_port {
bool eee_active;
bool tx_lpi_enabled;
- u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
+ u64 *ethtool_stats;
u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
@@ -4732,9 +4732,13 @@ static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
if (sset == ETH_SS_STATS) {
int i;
- for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
- memcpy(data + i * ETH_GSTRING_LEN,
- mvneta_statistics[i].name, ETH_GSTRING_LEN);
+ for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) {
+ memcpy(data, mvneta_statistics[i].name,
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ page_pool_ethtool_stats_get_strings(data);
}
}
@@ -4847,6 +4851,38 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
}
}
+static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data)
+{
+ struct page_pool_stats stats = {};
+ int i;
+
+ for (i = 0; i < rxq_number; i++) {
+ struct page_pool *page_pool = pp->rxqs[i].page_pool;
+ struct page_pool_stats pp_stats = {};
+
+ if (!page_pool_get_stats(page_pool, &pp_stats))
+ continue;
+
+ stats.alloc_stats.fast += pp_stats.alloc_stats.fast;
+ stats.alloc_stats.slow += pp_stats.alloc_stats.slow;
+ stats.alloc_stats.slow_high_order +=
+ pp_stats.alloc_stats.slow_high_order;
+ stats.alloc_stats.empty += pp_stats.alloc_stats.empty;
+ stats.alloc_stats.refill += pp_stats.alloc_stats.refill;
+ stats.alloc_stats.waive += pp_stats.alloc_stats.waive;
+ stats.recycle_stats.cached += pp_stats.recycle_stats.cached;
+ stats.recycle_stats.cache_full +=
+ pp_stats.recycle_stats.cache_full;
+ stats.recycle_stats.ring += pp_stats.recycle_stats.ring;
+ stats.recycle_stats.ring_full +=
+ pp_stats.recycle_stats.ring_full;
+ stats.recycle_stats.released_refcnt +=
+ pp_stats.recycle_stats.released_refcnt;
+ }
+
+ page_pool_ethtool_stats_get(data, &stats);
+}
+
static void mvneta_ethtool_get_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
@@ -4857,12 +4893,16 @@ static void mvneta_ethtool_get_stats(struct net_device *dev,
for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
*data++ = pp->ethtool_stats[i];
+
+ mvneta_ethtool_pp_stats(pp, data);
}
static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
{
if (sset == ETH_SS_STATS)
- return ARRAY_SIZE(mvneta_statistics);
+ return ARRAY_SIZE(mvneta_statistics) +
+ page_pool_ethtool_stats_get_count();
+
return -EOPNOTSUPP;
}
@@ -5372,6 +5412,7 @@ static int mvneta_probe(struct platform_device *pdev)
phy_interface_t phy_mode;
const char *mac_from;
int tx_csum_limit;
+ int stats_len;
int err;
int cpu;
@@ -5392,6 +5433,14 @@ static int mvneta_probe(struct platform_device *pdev)
pp->rxq_def = rxq_def;
pp->indir[0] = rxq_def;
+ stats_len = ARRAY_SIZE(mvneta_statistics) +
+ page_pool_ethtool_stats_get_count();
+ pp->ethtool_stats = devm_kzalloc(&pdev->dev,
+ sizeof(*pp->ethtool_stats) * stats_len,
+ GFP_KERNEL);
+ if (!pp->ethtool_stats)
+ return -ENOMEM;
+
err = of_get_phy_mode(dn, &phy_mode);
if (err) {
dev_err(&pdev->dev, "incorrect phy-mode\n");
--
2.35.1
^ permalink raw reply related [flat|nested] 9+ messages in thread
* Re: [RFC net-next 2/2] net: mvneta: add support for page_pool_get_stats
2022-04-07 16:55 ` [RFC net-next 2/2] net: mvneta: add support for page_pool_get_stats Lorenzo Bianconi
@ 2022-04-07 18:25 ` Andrew Lunn
2022-04-07 18:35 ` Ilias Apalodimas
0 siblings, 1 reply; 9+ messages in thread
From: Andrew Lunn @ 2022-04-07 18:25 UTC (permalink / raw)
To: Lorenzo Bianconi
Cc: netdev, lorenzo.bianconi, davem, kuba, pabeni, thomas.petazzoni,
ilias.apalodimas, jbrouer, jdamato
> +static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data)
> +{
> + struct page_pool_stats stats = {};
> + int i;
> +
> + for (i = 0; i < rxq_number; i++) {
> + struct page_pool *page_pool = pp->rxqs[i].page_pool;
> + struct page_pool_stats pp_stats = {};
> +
> + if (!page_pool_get_stats(page_pool, &pp_stats))
> + continue;
> +
> + stats.alloc_stats.fast += pp_stats.alloc_stats.fast;
> + stats.alloc_stats.slow += pp_stats.alloc_stats.slow;
> + stats.alloc_stats.slow_high_order +=
> + pp_stats.alloc_stats.slow_high_order;
> + stats.alloc_stats.empty += pp_stats.alloc_stats.empty;
> + stats.alloc_stats.refill += pp_stats.alloc_stats.refill;
> + stats.alloc_stats.waive += pp_stats.alloc_stats.waive;
> + stats.recycle_stats.cached += pp_stats.recycle_stats.cached;
> + stats.recycle_stats.cache_full +=
> + pp_stats.recycle_stats.cache_full;
> + stats.recycle_stats.ring += pp_stats.recycle_stats.ring;
> + stats.recycle_stats.ring_full +=
> + pp_stats.recycle_stats.ring_full;
> + stats.recycle_stats.released_refcnt +=
> + pp_stats.recycle_stats.released_refcnt;
We should be trying to remove this sort of code from the driver, and
put it all in the core. It wants to be something more like:
struct page_pool_stats stats = {};
int i;
for (i = 0; i < rxq_number; i++) {
struct page_pool *page_pool = pp->rxqs[i].page_pool;
if (!page_pool_get_stats(page_pool, &stats))
continue;
page_pool_ethtool_stats_get(data, &stats);
Let page_pool_get_stats() do the accumulate as it puts values in stats.
You probably should also rework the mellanox driver to use the same
code structure.
Andrew
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [RFC net-next 2/2] net: mvneta: add support for page_pool_get_stats
2022-04-07 18:25 ` Andrew Lunn
@ 2022-04-07 18:35 ` Ilias Apalodimas
2022-04-07 19:25 ` Andrew Lunn
0 siblings, 1 reply; 9+ messages in thread
From: Ilias Apalodimas @ 2022-04-07 18:35 UTC (permalink / raw)
To: Andrew Lunn
Cc: Lorenzo Bianconi, netdev, lorenzo.bianconi, davem, kuba, pabeni,
thomas.petazzoni, jbrouer, jdamato
Hi Andrew,
On Thu, 7 Apr 2022 at 21:25, Andrew Lunn <andrew@lunn.ch> wrote:
>
> > +static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data)
> > +{
> > + struct page_pool_stats stats = {};
> > + int i;
> > +
> > + for (i = 0; i < rxq_number; i++) {
> > + struct page_pool *page_pool = pp->rxqs[i].page_pool;
> > + struct page_pool_stats pp_stats = {};
> > +
> > + if (!page_pool_get_stats(page_pool, &pp_stats))
> > + continue;
> > +
> > + stats.alloc_stats.fast += pp_stats.alloc_stats.fast;
> > + stats.alloc_stats.slow += pp_stats.alloc_stats.slow;
> > + stats.alloc_stats.slow_high_order +=
> > + pp_stats.alloc_stats.slow_high_order;
> > + stats.alloc_stats.empty += pp_stats.alloc_stats.empty;
> > + stats.alloc_stats.refill += pp_stats.alloc_stats.refill;
> > + stats.alloc_stats.waive += pp_stats.alloc_stats.waive;
> > + stats.recycle_stats.cached += pp_stats.recycle_stats.cached;
> > + stats.recycle_stats.cache_full +=
> > + pp_stats.recycle_stats.cache_full;
> > + stats.recycle_stats.ring += pp_stats.recycle_stats.ring;
> > + stats.recycle_stats.ring_full +=
> > + pp_stats.recycle_stats.ring_full;
> > + stats.recycle_stats.released_refcnt +=
> > + pp_stats.recycle_stats.released_refcnt;
>
> We should be trying to remove this sort of code from the driver, and
> put it all in the core. It wants to be something more like:
>
> struct page_pool_stats stats = {};
> int i;
>
> for (i = 0; i < rxq_number; i++) {
> struct page_pool *page_pool = pp->rxqs[i].page_pool;
>
> if (!page_pool_get_stats(page_pool, &stats))
> continue;
>
> page_pool_ethtool_stats_get(data, &stats);
>
> Let page_pool_get_stats() do the accumulate as it puts values in stats.
Unless I misunderstand this, I don't think that's doable in page pool.
That means page pool is aware of what stats to accumulate per driver
and I certainly don't want anything driver specific to creep in there.
The driver knows the number of pools he is using and he can gather
them all together.
Regards
/Ilias
>
> You probably should also rework the mellanox driver to use the same
> code structure.
>
> Andrew
>
>
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [RFC net-next 2/2] net: mvneta: add support for page_pool_get_stats
2022-04-07 18:35 ` Ilias Apalodimas
@ 2022-04-07 19:25 ` Andrew Lunn
2022-04-07 20:19 ` Ilias Apalodimas
0 siblings, 1 reply; 9+ messages in thread
From: Andrew Lunn @ 2022-04-07 19:25 UTC (permalink / raw)
To: Ilias Apalodimas
Cc: Lorenzo Bianconi, netdev, lorenzo.bianconi, davem, kuba, pabeni,
thomas.petazzoni, jbrouer, jdamato
On Thu, Apr 07, 2022 at 09:35:52PM +0300, Ilias Apalodimas wrote:
> Hi Andrew,
>
> On Thu, 7 Apr 2022 at 21:25, Andrew Lunn <andrew@lunn.ch> wrote:
> >
> > > +static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data)
> > > +{
> > > + struct page_pool_stats stats = {};
> > > + int i;
> > > +
> > > + for (i = 0; i < rxq_number; i++) {
> > > + struct page_pool *page_pool = pp->rxqs[i].page_pool;
> > > + struct page_pool_stats pp_stats = {};
> > > +
> > > + if (!page_pool_get_stats(page_pool, &pp_stats))
> > > + continue;
> > > +
> > > + stats.alloc_stats.fast += pp_stats.alloc_stats.fast;
> > > + stats.alloc_stats.slow += pp_stats.alloc_stats.slow;
> > > + stats.alloc_stats.slow_high_order +=
> > > + pp_stats.alloc_stats.slow_high_order;
> > > + stats.alloc_stats.empty += pp_stats.alloc_stats.empty;
> > > + stats.alloc_stats.refill += pp_stats.alloc_stats.refill;
> > > + stats.alloc_stats.waive += pp_stats.alloc_stats.waive;
> > > + stats.recycle_stats.cached += pp_stats.recycle_stats.cached;
> > > + stats.recycle_stats.cache_full +=
> > > + pp_stats.recycle_stats.cache_full;
> > > + stats.recycle_stats.ring += pp_stats.recycle_stats.ring;
> > > + stats.recycle_stats.ring_full +=
> > > + pp_stats.recycle_stats.ring_full;
> > > + stats.recycle_stats.released_refcnt +=
> > > + pp_stats.recycle_stats.released_refcnt;
> >
> > We should be trying to remove this sort of code from the driver, and
> > put it all in the core. It wants to be something more like:
> >
> > struct page_pool_stats stats = {};
> > int i;
> >
> > for (i = 0; i < rxq_number; i++) {
> > struct page_pool *page_pool = pp->rxqs[i].page_pool;
> >
> > if (!page_pool_get_stats(page_pool, &stats))
> > continue;
> >
> > page_pool_ethtool_stats_get(data, &stats);
> >
> > Let page_pool_get_stats() do the accumulate as it puts values in stats.
>
> Unless I misunderstand this, I don't think that's doable in page pool.
> That means page pool is aware of what stats to accumulate per driver
> and I certainly don't want anything driver specific to creep in there.
> The driver knows the number of pools he is using and he can gather
> them all together.
I agree that the driver knows about the number of pools. For mvneta,
there is one per RX queue. Which is this part of my suggestion
> > for (i = 0; i < rxq_number; i++) {
> > struct page_pool *page_pool = pp->rxqs[i].page_pool;
> >
However, it has no idea about the stats themselves. They are purely a
construct of the page pool. Hence the next part of my suggest, ask the
page pool for the stats, place them into stats, doing the accumulate
at the same time.:
> > if (!page_pool_get_stats(page_pool, &stats))
> > continue;
and now we have the accumulated stats, turn them into ethtool format:
> > page_pool_ethtool_stats_get(data, &stats);
Where do you see any driver knowledge required in either of
page_pool_get_stats() or page_pool_ethtool_stats_get().
Andrew
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [RFC net-next 2/2] net: mvneta: add support for page_pool_get_stats
2022-04-07 19:25 ` Andrew Lunn
@ 2022-04-07 20:19 ` Ilias Apalodimas
0 siblings, 0 replies; 9+ messages in thread
From: Ilias Apalodimas @ 2022-04-07 20:19 UTC (permalink / raw)
To: Andrew Lunn
Cc: Lorenzo Bianconi, netdev, lorenzo.bianconi, davem, kuba, pabeni,
thomas.petazzoni, jbrouer, jdamato
Hi Andrew,
[...]
> > > > +
> > > > + stats.alloc_stats.fast += pp_stats.alloc_stats.fast;
> > > > + stats.alloc_stats.slow += pp_stats.alloc_stats.slow;
> > > > + stats.alloc_stats.slow_high_order +=
> > > > + pp_stats.alloc_stats.slow_high_order;
> > > > + stats.alloc_stats.empty += pp_stats.alloc_stats.empty;
> > > > + stats.alloc_stats.refill += pp_stats.alloc_stats.refill;
> > > > + stats.alloc_stats.waive += pp_stats.alloc_stats.waive;
> > > > + stats.recycle_stats.cached += pp_stats.recycle_stats.cached;
> > > > + stats.recycle_stats.cache_full +=
> > > > + pp_stats.recycle_stats.cache_full;
> > > > + stats.recycle_stats.ring += pp_stats.recycle_stats.ring;
> > > > + stats.recycle_stats.ring_full +=
> > > > + pp_stats.recycle_stats.ring_full;
> > > > + stats.recycle_stats.released_refcnt +=
> > > > + pp_stats.recycle_stats.released_refcnt;
> > >
> > > We should be trying to remove this sort of code from the driver, and
> > > put it all in the core. It wants to be something more like:
> > >
> > > struct page_pool_stats stats = {};
> > > int i;
> > >
> > > for (i = 0; i < rxq_number; i++) {
> > > struct page_pool *page_pool = pp->rxqs[i].page_pool;
> > >
> > > if (!page_pool_get_stats(page_pool, &stats))
> > > continue;
> > >
> > > page_pool_ethtool_stats_get(data, &stats);
> > >
> > > Let page_pool_get_stats() do the accumulate as it puts values in stats.
> >
> > Unless I misunderstand this, I don't think that's doable in page pool.
> > That means page pool is aware of what stats to accumulate per driver
> > and I certainly don't want anything driver specific to creep in there.
> > The driver knows the number of pools he is using and he can gather
> > them all together.
>
> I agree that the driver knows about the number of pools. For mvneta,
> there is one per RX queue. Which is this part of my suggestion
>
> > > for (i = 0; i < rxq_number; i++) {
> > > struct page_pool *page_pool = pp->rxqs[i].page_pool;
> > >
>
> However, it has no idea about the stats themselves. They are purely a
> construct of the page pool. Hence the next part of my suggest, ask the
> page pool for the stats, place them into stats, doing the accumulate
> at the same time.:
>
> > > if (!page_pool_get_stats(page_pool, &stats))
> > > continue;
>
> and now we have the accumulated stats, turn them into ethtool format:
>
> > > page_pool_ethtool_stats_get(data, &stats);
>
> Where do you see any driver knowledge required in either of
> page_pool_get_stats() or page_pool_ethtool_stats_get().
Indeed I read the first mail wrong. I thought you wanted page_pool it
self to account for the driver stats without passing a 'struct
page_pool *pool' to page_pool_get_stats(). In a system with XDP (which
also uses page_pool) or multiple drivers using that, it would require
some metadata fed into the page pool subsystem to reason about which
pools to accumulate.
The code snip you included seems fine.
Thanks
/Ilias
>
> Andrew
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [RFC net-next 1/2] net: page_pool: introduce ethtool stats
2022-04-07 16:55 ` [RFC net-next 1/2] net: page_pool: introduce ethtool stats Lorenzo Bianconi
@ 2022-04-08 3:30 ` Jakub Kicinski
2022-04-08 8:11 ` Lorenzo Bianconi
0 siblings, 1 reply; 9+ messages in thread
From: Jakub Kicinski @ 2022-04-08 3:30 UTC (permalink / raw)
To: Lorenzo Bianconi
Cc: netdev, lorenzo.bianconi, davem, pabeni, thomas.petazzoni,
ilias.apalodimas, jbrouer, andrew, jdamato
On Thu, 7 Apr 2022 18:55:04 +0200 Lorenzo Bianconi wrote:
> +void page_pool_ethtool_stats_get_strings(u8 *data)
This needs to return the pointer to after the used area, so drivers can
append more stats. Or make data double pointer and update it before
returning.
> +{
> + static const struct {
> + u8 type;
> + char name[ETH_GSTRING_LEN];
> + } stats[PP_ETHTOOL_STATS_MAX] = {
> + {
> + .type = PP_ETHTOOL_ALLOC_FAST,
Why enumerate the type? It's not used.
> + .name = "rx_pp_alloc_fast"
> + }, {
> + .type = PP_ETHTOOL_ALLOC_SLOW,
> + .name = "rx_pp_alloc_slow"
> + }, {
> + .type = PP_ETHTOOL_ALLOC_SLOW_HIGH_ORDER,
> + .name = "rx_pp_alloc_slow_ho"
> + }, {
> + .type = PP_ETHTOOL_ALLOC_EMPTY,
> + .name = "rx_pp_alloc_empty"
> + }, {
> + .type = PP_ETHTOOL_ALLOC_REFILL,
> + .name = "rx_pp_alloc_refill"
> + }, {
> + .type = PP_ETHTOOL_ALLOC_WAIVE,
> + .name = "rx_pp_alloc_waive"
> + }, {
> + .type = PP_ETHTOOL_RECYCLE_CACHED,
> + .name = "rx_pp_recycle_cached"
> + }, {
> + .type = PP_ETHTOOL_RECYCLE_CACHE_FULL,
> + .name = "rx_pp_recycle_cache_full"
> + }, {
> + .type = PP_ETHTOOL_RECYCLE_RING,
> + .name = "rx_pp_recycle_ring"
> + }, {
> + .type = PP_ETHTOOL_RECYCLE_RING_FULL,
> + .name = "rx_pp_recycle_ring_full"
> + }, {
> + .type = PP_ETHTOOL_RECYCLE_RELEASED_REF,
> + .name = "rx_pp_recycle_released_ref"
> + },
> + };
> + int i;
> +
> + for (i = 0; i < PP_ETHTOOL_STATS_MAX; i++) {
> + memcpy(data, stats[i].name, ETH_GSTRING_LEN);
> + data += ETH_GSTRING_LEN;
> + }
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [RFC net-next 1/2] net: page_pool: introduce ethtool stats
2022-04-08 3:30 ` Jakub Kicinski
@ 2022-04-08 8:11 ` Lorenzo Bianconi
0 siblings, 0 replies; 9+ messages in thread
From: Lorenzo Bianconi @ 2022-04-08 8:11 UTC (permalink / raw)
To: Jakub Kicinski
Cc: netdev, lorenzo.bianconi, davem, pabeni, thomas.petazzoni,
ilias.apalodimas, jbrouer, andrew, jdamato
[-- Attachment #1: Type: text/plain, Size: 1833 bytes --]
On Apr 07, Jakub Kicinski wrote:
> On Thu, 7 Apr 2022 18:55:04 +0200 Lorenzo Bianconi wrote:
> > +void page_pool_ethtool_stats_get_strings(u8 *data)
>
> This needs to return the pointer to after the used area, so drivers can
> append more stats. Or make data double pointer and update it before
> returning.
ack, I will fix it in v2.
>
> > +{
> > + static const struct {
> > + u8 type;
> > + char name[ETH_GSTRING_LEN];
> > + } stats[PP_ETHTOOL_STATS_MAX] = {
> > + {
> > + .type = PP_ETHTOOL_ALLOC_FAST,
>
> Why enumerate the type? It's not used.
ack, I will fix it in v2.
Regards,
Lorenzo
>
> > + .name = "rx_pp_alloc_fast"
> > + }, {
> > + .type = PP_ETHTOOL_ALLOC_SLOW,
> > + .name = "rx_pp_alloc_slow"
> > + }, {
> > + .type = PP_ETHTOOL_ALLOC_SLOW_HIGH_ORDER,
> > + .name = "rx_pp_alloc_slow_ho"
> > + }, {
> > + .type = PP_ETHTOOL_ALLOC_EMPTY,
> > + .name = "rx_pp_alloc_empty"
> > + }, {
> > + .type = PP_ETHTOOL_ALLOC_REFILL,
> > + .name = "rx_pp_alloc_refill"
> > + }, {
> > + .type = PP_ETHTOOL_ALLOC_WAIVE,
> > + .name = "rx_pp_alloc_waive"
> > + }, {
> > + .type = PP_ETHTOOL_RECYCLE_CACHED,
> > + .name = "rx_pp_recycle_cached"
> > + }, {
> > + .type = PP_ETHTOOL_RECYCLE_CACHE_FULL,
> > + .name = "rx_pp_recycle_cache_full"
> > + }, {
> > + .type = PP_ETHTOOL_RECYCLE_RING,
> > + .name = "rx_pp_recycle_ring"
> > + }, {
> > + .type = PP_ETHTOOL_RECYCLE_RING_FULL,
> > + .name = "rx_pp_recycle_ring_full"
> > + }, {
> > + .type = PP_ETHTOOL_RECYCLE_RELEASED_REF,
> > + .name = "rx_pp_recycle_released_ref"
> > + },
> > + };
> > + int i;
> > +
> > + for (i = 0; i < PP_ETHTOOL_STATS_MAX; i++) {
> > + memcpy(data, stats[i].name, ETH_GSTRING_LEN);
> > + data += ETH_GSTRING_LEN;
> > + }
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 228 bytes --]
^ permalink raw reply [flat|nested] 9+ messages in thread
end of thread, other threads:[~2022-04-08 8:11 UTC | newest]
Thread overview: 9+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-04-07 16:55 [RFC net-next 0/2] Add page_pool_stats ethtool APIs Lorenzo Bianconi
2022-04-07 16:55 ` [RFC net-next 1/2] net: page_pool: introduce ethtool stats Lorenzo Bianconi
2022-04-08 3:30 ` Jakub Kicinski
2022-04-08 8:11 ` Lorenzo Bianconi
2022-04-07 16:55 ` [RFC net-next 2/2] net: mvneta: add support for page_pool_get_stats Lorenzo Bianconi
2022-04-07 18:25 ` Andrew Lunn
2022-04-07 18:35 ` Ilias Apalodimas
2022-04-07 19:25 ` Andrew Lunn
2022-04-07 20:19 ` Ilias Apalodimas
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).