* [PATCH net-next] net: mvneta: add support for page_pool_get_stats
@ 2022-04-05 20:32 Lorenzo Bianconi
2022-04-06 12:49 ` Andrew Lunn
2022-04-06 13:38 ` Andrew Lunn
0 siblings, 2 replies; 9+ messages in thread
From: Lorenzo Bianconi @ 2022-04-05 20:32 UTC (permalink / raw)
To: netdev
Cc: lorenzo.bianconi, davem, kuba, pabeni, thomas.petazzoni, linux,
jbrouer, ilias.apalodimas, jdamato
Introduce support for the page_pool_get_stats API to mvneta driver.
If CONFIG_PAGE_POOL_STATS is enabled, ethtool will report page pool
stats.
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
---
drivers/net/ethernet/marvell/mvneta.c | 105 ++++++++++++++++++++++++++
1 file changed, 105 insertions(+)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 934f6dd90992..b986a6bded9a 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -382,6 +382,19 @@ enum {
ETHTOOL_XDP_TX_ERR,
ETHTOOL_XDP_XMIT,
ETHTOOL_XDP_XMIT_ERR,
+#ifdef CONFIG_PAGE_POOL_STATS
+ ETHTOOL_PP_ALLOC_FAST,
+ ETHTOOL_PP_ALLOC_SLOW,
+ ETHTOOL_PP_ALLOC_SLOW_HIGH_ORDER,
+ ETHTOOL_PP_ALLOC_EMPTY,
+ ETHTOOL_PP_ALLOC_REFILL,
+ ETHTOOL_PP_ALLOC_WAIVE,
+ ETHTOOL_PP_RECYCLE_CACHED,
+ ETHTOOL_PP_RECYCLE_CACHE_FULL,
+ ETHTOOL_PP_RECYCLE_RING,
+ ETHTOOL_PP_RECYCLE_RING_FULL,
+ ETHTOOL_PP_RECYCLE_RELEASED_REF,
+#endif /* CONFIG_PAGE_POOL_STATS */
ETHTOOL_MAX_STATS,
};
@@ -443,6 +456,19 @@ static const struct mvneta_statistic mvneta_statistics[] = {
{ ETHTOOL_XDP_TX_ERR, T_SW, "rx_xdp_tx_errors", },
{ ETHTOOL_XDP_XMIT, T_SW, "tx_xdp_xmit", },
{ ETHTOOL_XDP_XMIT_ERR, T_SW, "tx_xdp_xmit_errors", },
+#ifdef CONFIG_PAGE_POOL_STATS
+ { ETHTOOL_PP_ALLOC_FAST, T_SW, "rx_pp_alloc_fast", },
+ { ETHTOOL_PP_ALLOC_SLOW, T_SW, "rx_pp_alloc_slow", },
+ { ETHTOOL_PP_ALLOC_SLOW_HIGH_ORDER, T_SW, "rx_pp_alloc_slow_ho", },
+ { ETHTOOL_PP_ALLOC_EMPTY, T_SW, "rx_pp_alloc_empty", },
+ { ETHTOOL_PP_ALLOC_REFILL, T_SW, "rx_pp_alloc_refill", },
+ { ETHTOOL_PP_ALLOC_WAIVE, T_SW, "rx_pp_alloc_waive", },
+ { ETHTOOL_PP_RECYCLE_CACHED, T_SW, "rx_pp_recycle_cached", },
+ { ETHTOOL_PP_RECYCLE_CACHE_FULL, T_SW, "rx_pp_recycle_cache_full", },
+ { ETHTOOL_PP_RECYCLE_RING, T_SW, "rx_pp_recycle_ring", },
+ { ETHTOOL_PP_RECYCLE_RING_FULL, T_SW, "rx_pp_recycle_ring_full", },
+ { ETHTOOL_PP_RECYCLE_RELEASED_REF, T_SW, "rx_pp_recycle_released_ref", },
+#endif /* CONFIG_PAGE_POOL_STATS */
};
struct mvneta_stats {
@@ -4783,16 +4809,56 @@ mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp,
}
}
+#ifdef CONFIG_PAGE_POOL_STATS
+static void mvneta_ethtool_update_pp_stats(struct mvneta_port *pp,
+ struct page_pool_stats *stats)
+{
+ int i;
+
+ memset(stats, 0, sizeof(*stats));
+ for (i = 0; i < rxq_number; i++) {
+ struct page_pool *page_pool = pp->rxqs[i].page_pool;
+ struct page_pool_stats pp_stats = {};
+
+ if (!page_pool_get_stats(page_pool, &pp_stats))
+ continue;
+
+ stats->alloc_stats.fast += pp_stats.alloc_stats.fast;
+ stats->alloc_stats.slow += pp_stats.alloc_stats.slow;
+ stats->alloc_stats.slow_high_order +=
+ pp_stats.alloc_stats.slow_high_order;
+ stats->alloc_stats.empty += pp_stats.alloc_stats.empty;
+ stats->alloc_stats.refill += pp_stats.alloc_stats.refill;
+ stats->alloc_stats.waive += pp_stats.alloc_stats.waive;
+ stats->recycle_stats.cached += pp_stats.recycle_stats.cached;
+ stats->recycle_stats.cache_full +=
+ pp_stats.recycle_stats.cache_full;
+ stats->recycle_stats.ring += pp_stats.recycle_stats.ring;
+ stats->recycle_stats.ring_full +=
+ pp_stats.recycle_stats.ring_full;
+ stats->recycle_stats.released_refcnt +=
+ pp_stats.recycle_stats.released_refcnt;
+ }
+}
+#endif /* CONFIG_PAGE_POOL_STATS */
+
static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
{
struct mvneta_ethtool_stats stats = {};
const struct mvneta_statistic *s;
+#ifdef CONFIG_PAGE_POOL_STATS
+ struct page_pool_stats pp_stats;
+#endif /* CONFIG_PAGE_POOL_STATS */
void __iomem *base = pp->base;
u32 high, low;
u64 val;
int i;
mvneta_ethtool_update_pcpu_stats(pp, &stats);
+#ifdef CONFIG_PAGE_POOL_STATS
+ mvneta_ethtool_update_pp_stats(pp, &pp_stats);
+#endif /* CONFIG_PAGE_POOL_STATS */
+
for (i = 0, s = mvneta_statistics;
s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
s++, i++) {
@@ -4841,6 +4907,45 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
case ETHTOOL_XDP_XMIT_ERR:
pp->ethtool_stats[i] = stats.ps.xdp_xmit_err;
break;
+#ifdef CONFIG_PAGE_POOL_STATS
+ case ETHTOOL_PP_ALLOC_FAST:
+ pp->ethtool_stats[i] = pp_stats.alloc_stats.fast;
+ break;
+ case ETHTOOL_PP_ALLOC_SLOW:
+ pp->ethtool_stats[i] = pp_stats.alloc_stats.slow;
+ break;
+ case ETHTOOL_PP_ALLOC_SLOW_HIGH_ORDER:
+ pp->ethtool_stats[i] =
+ pp_stats.alloc_stats.slow_high_order;
+ break;
+ case ETHTOOL_PP_ALLOC_EMPTY:
+ pp->ethtool_stats[i] = pp_stats.alloc_stats.empty;
+ break;
+ case ETHTOOL_PP_ALLOC_REFILL:
+ pp->ethtool_stats[i] = pp_stats.alloc_stats.refill;
+ break;
+ case ETHTOOL_PP_ALLOC_WAIVE:
+ pp->ethtool_stats[i] = pp_stats.alloc_stats.waive;
+ break;
+ case ETHTOOL_PP_RECYCLE_CACHED:
+ pp->ethtool_stats[i] = pp_stats.recycle_stats.cached;
+ break;
+ case ETHTOOL_PP_RECYCLE_CACHE_FULL:
+ pp->ethtool_stats[i] =
+ pp_stats.recycle_stats.cache_full;
+ break;
+ case ETHTOOL_PP_RECYCLE_RING:
+ pp->ethtool_stats[i] = pp_stats.recycle_stats.ring;
+ break;
+ case ETHTOOL_PP_RECYCLE_RING_FULL:
+ pp->ethtool_stats[i] =
+ pp_stats.recycle_stats.ring_full;
+ break;
+ case ETHTOOL_PP_RECYCLE_RELEASED_REF:
+ pp->ethtool_stats[i] =
+ pp_stats.recycle_stats.released_refcnt;
+ break;
+#endif /* CONFIG_PAGE_POOL_STATS */
}
break;
}
--
2.35.1
^ permalink raw reply related [flat|nested] 9+ messages in thread* Re: [PATCH net-next] net: mvneta: add support for page_pool_get_stats
2022-04-05 20:32 [PATCH net-next] net: mvneta: add support for page_pool_get_stats Lorenzo Bianconi
@ 2022-04-06 12:49 ` Andrew Lunn
2022-04-06 12:53 ` Ilias Apalodimas
2022-04-06 13:38 ` Andrew Lunn
1 sibling, 1 reply; 9+ messages in thread
From: Andrew Lunn @ 2022-04-06 12:49 UTC (permalink / raw)
To: Lorenzo Bianconi
Cc: netdev, lorenzo.bianconi, davem, kuba, pabeni, thomas.petazzoni,
linux, jbrouer, ilias.apalodimas, jdamato
On Tue, Apr 05, 2022 at 10:32:12PM +0200, Lorenzo Bianconi wrote:
> Introduce support for the page_pool_get_stats API to mvneta driver.
> If CONFIG_PAGE_POOL_STATS is enabled, ethtool will report page pool
> stats.
Hi Lorenzo
There are a lot of #ifdef in this patch. They are generally not
liked. What does the patch actually depend on? mnveta has a select
PAGE_POOL so the page pool itself should always be available?
Andrew
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH net-next] net: mvneta: add support for page_pool_get_stats
2022-04-06 12:49 ` Andrew Lunn
@ 2022-04-06 12:53 ` Ilias Apalodimas
2022-04-06 13:05 ` Lorenzo Bianconi
0 siblings, 1 reply; 9+ messages in thread
From: Ilias Apalodimas @ 2022-04-06 12:53 UTC (permalink / raw)
To: Andrew Lunn
Cc: Lorenzo Bianconi, netdev, lorenzo.bianconi, davem, kuba, pabeni,
thomas.petazzoni, linux, jbrouer, jdamato
Hi Andrew
On Wed, 6 Apr 2022 at 15:49, Andrew Lunn <andrew@lunn.ch> wrote:
>
> On Tue, Apr 05, 2022 at 10:32:12PM +0200, Lorenzo Bianconi wrote:
> > Introduce support for the page_pool_get_stats API to mvneta driver.
> > If CONFIG_PAGE_POOL_STATS is enabled, ethtool will report page pool
> > stats.
>
> Hi Lorenzo
>
> There are a lot of #ifdef in this patch. They are generally not
> liked. What does the patch actually depend on? mnveta has a select
> PAGE_POOL so the page pool itself should always be available?
The stats are on a different Kconfig since we wanted to allow people
opt out if they were worried about performance. However this is not a
10gbit interface and I doubt there will be any measurable hit. I
think selecting PAGE_POOL_STATS for the driver makes sense and will
make the code easier to follow.
Thanks
/Ilias
>
> Andrew
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH net-next] net: mvneta: add support for page_pool_get_stats
2022-04-06 12:53 ` Ilias Apalodimas
@ 2022-04-06 13:05 ` Lorenzo Bianconi
0 siblings, 0 replies; 9+ messages in thread
From: Lorenzo Bianconi @ 2022-04-06 13:05 UTC (permalink / raw)
To: Ilias Apalodimas
Cc: Andrew Lunn, netdev, lorenzo.bianconi, davem, kuba, pabeni,
thomas.petazzoni, linux, jbrouer, jdamato
[-- Attachment #1: Type: text/plain, Size: 1116 bytes --]
> Hi Andrew
>
> On Wed, 6 Apr 2022 at 15:49, Andrew Lunn <andrew@lunn.ch> wrote:
> >
> > On Tue, Apr 05, 2022 at 10:32:12PM +0200, Lorenzo Bianconi wrote:
> > > Introduce support for the page_pool_get_stats API to mvneta driver.
> > > If CONFIG_PAGE_POOL_STATS is enabled, ethtool will report page pool
> > > stats.
> >
> > Hi Lorenzo
> >
> > There are a lot of #ifdef in this patch. They are generally not
> > liked. What does the patch actually depend on? mnveta has a select
> > PAGE_POOL so the page pool itself should always be available?
>
> The stats are on a different Kconfig since we wanted to allow people
> opt out if they were worried about performance. However this is not a
> 10gbit interface and I doubt there will be any measurable hit. I
> think selecting PAGE_POOL_STATS for the driver makes sense and will
> make the code easier to follow.
Hi Andrew and Ilias,
I just kept the same approach used for mlx driver but I am fine to always
select PAGE_POOL_STATS for mvneta. I will fix it in v2.
Regards,
Lorenzo
>
> Thanks
> /Ilias
> >
> > Andrew
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 228 bytes --]
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH net-next] net: mvneta: add support for page_pool_get_stats
2022-04-05 20:32 [PATCH net-next] net: mvneta: add support for page_pool_get_stats Lorenzo Bianconi
2022-04-06 12:49 ` Andrew Lunn
@ 2022-04-06 13:38 ` Andrew Lunn
2022-04-06 14:02 ` Lorenzo Bianconi
1 sibling, 1 reply; 9+ messages in thread
From: Andrew Lunn @ 2022-04-06 13:38 UTC (permalink / raw)
To: Lorenzo Bianconi
Cc: netdev, lorenzo.bianconi, davem, kuba, pabeni, thomas.petazzoni,
linux, jbrouer, ilias.apalodimas, jdamato
> +static void mvneta_ethtool_update_pp_stats(struct mvneta_port *pp,
> + struct page_pool_stats *stats)
> +{
> + int i;
> +
> + memset(stats, 0, sizeof(*stats));
> + for (i = 0; i < rxq_number; i++) {
> + struct page_pool *page_pool = pp->rxqs[i].page_pool;
> + struct page_pool_stats pp_stats = {};
> +
> + if (!page_pool_get_stats(page_pool, &pp_stats))
> + continue;
> +
> + stats->alloc_stats.fast += pp_stats.alloc_stats.fast;
> + stats->alloc_stats.slow += pp_stats.alloc_stats.slow;
> + stats->alloc_stats.slow_high_order +=
> + pp_stats.alloc_stats.slow_high_order;
> + stats->alloc_stats.empty += pp_stats.alloc_stats.empty;
> + stats->alloc_stats.refill += pp_stats.alloc_stats.refill;
> + stats->alloc_stats.waive += pp_stats.alloc_stats.waive;
> + stats->recycle_stats.cached += pp_stats.recycle_stats.cached;
> + stats->recycle_stats.cache_full +=
> + pp_stats.recycle_stats.cache_full;
> + stats->recycle_stats.ring += pp_stats.recycle_stats.ring;
> + stats->recycle_stats.ring_full +=
> + pp_stats.recycle_stats.ring_full;
> + stats->recycle_stats.released_refcnt +=
> + pp_stats.recycle_stats.released_refcnt;
Am i right in saying, these are all software stats? They are also
generic for any receive queue using the page pool?
It seems odd the driver is doing the addition here. Why not pass stats
into page_pool_get_stats()? That will make it easier when you add
additional statistics?
I'm also wondering if ethtool -S is even the correct API. It should be
for hardware dependent statistics, those which change between
implementations. Where as these statistics should be generic. Maybe
they should be in /sys/class/net/ethX/statistics/ and the driver
itself is not even involved, the page pool code implements it?
Andrew
^ permalink raw reply [flat|nested] 9+ messages in thread* Re: [PATCH net-next] net: mvneta: add support for page_pool_get_stats
2022-04-06 13:38 ` Andrew Lunn
@ 2022-04-06 14:02 ` Lorenzo Bianconi
2022-04-06 23:01 ` Joe Damato
0 siblings, 1 reply; 9+ messages in thread
From: Lorenzo Bianconi @ 2022-04-06 14:02 UTC (permalink / raw)
To: Andrew Lunn
Cc: netdev, lorenzo.bianconi, davem, kuba, pabeni, thomas.petazzoni,
linux, jbrouer, ilias.apalodimas, jdamato
[-- Attachment #1: Type: text/plain, Size: 2337 bytes --]
> > +static void mvneta_ethtool_update_pp_stats(struct mvneta_port *pp,
> > + struct page_pool_stats *stats)
> > +{
> > + int i;
> > +
> > + memset(stats, 0, sizeof(*stats));
> > + for (i = 0; i < rxq_number; i++) {
> > + struct page_pool *page_pool = pp->rxqs[i].page_pool;
> > + struct page_pool_stats pp_stats = {};
> > +
> > + if (!page_pool_get_stats(page_pool, &pp_stats))
> > + continue;
> > +
> > + stats->alloc_stats.fast += pp_stats.alloc_stats.fast;
> > + stats->alloc_stats.slow += pp_stats.alloc_stats.slow;
> > + stats->alloc_stats.slow_high_order +=
> > + pp_stats.alloc_stats.slow_high_order;
> > + stats->alloc_stats.empty += pp_stats.alloc_stats.empty;
> > + stats->alloc_stats.refill += pp_stats.alloc_stats.refill;
> > + stats->alloc_stats.waive += pp_stats.alloc_stats.waive;
> > + stats->recycle_stats.cached += pp_stats.recycle_stats.cached;
> > + stats->recycle_stats.cache_full +=
> > + pp_stats.recycle_stats.cache_full;
> > + stats->recycle_stats.ring += pp_stats.recycle_stats.ring;
> > + stats->recycle_stats.ring_full +=
> > + pp_stats.recycle_stats.ring_full;
> > + stats->recycle_stats.released_refcnt +=
> > + pp_stats.recycle_stats.released_refcnt;
>
> Am i right in saying, these are all software stats? They are also
> generic for any receive queue using the page pool?
yes, these stats are accounted by the kernel so they are sw stats, but I guess
xdp ones are sw as well, right?
>
> It seems odd the driver is doing the addition here. Why not pass stats
> into page_pool_get_stats()? That will make it easier when you add
> additional statistics?
>
> I'm also wondering if ethtool -S is even the correct API. It should be
> for hardware dependent statistics, those which change between
> implementations. Where as these statistics should be generic. Maybe
> they should be in /sys/class/net/ethX/statistics/ and the driver
> itself is not even involved, the page pool code implements it?
I do not have a strong opinion on it, but I can see an issue for some drivers
(e.g. mvpp2 iirc) where page_pools are not specific for each net_device but are shared
between multiple ports, so maybe it is better to allow the driver to decide how
to report them. What do you think?
Regards,
Lorenzo
>
> Andrew
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 228 bytes --]
^ permalink raw reply [flat|nested] 9+ messages in thread* Re: [PATCH net-next] net: mvneta: add support for page_pool_get_stats
2022-04-06 14:02 ` Lorenzo Bianconi
@ 2022-04-06 23:01 ` Joe Damato
2022-04-06 23:36 ` Andrew Lunn
0 siblings, 1 reply; 9+ messages in thread
From: Joe Damato @ 2022-04-06 23:01 UTC (permalink / raw)
To: Lorenzo Bianconi
Cc: Andrew Lunn, netdev, lorenzo.bianconi, davem, kuba, pabeni,
thomas.petazzoni, linux, jbrouer, ilias.apalodimas
On Wed, Apr 06, 2022 at 04:02:44PM +0200, Lorenzo Bianconi wrote:
> > > +static void mvneta_ethtool_update_pp_stats(struct mvneta_port *pp,
> > > + struct page_pool_stats *stats)
> > > +{
> > > + int i;
> > > +
> > > + memset(stats, 0, sizeof(*stats));
> > > + for (i = 0; i < rxq_number; i++) {
> > > + struct page_pool *page_pool = pp->rxqs[i].page_pool;
> > > + struct page_pool_stats pp_stats = {};
> > > +
> > > + if (!page_pool_get_stats(page_pool, &pp_stats))
> > > + continue;
> > > +
> > > + stats->alloc_stats.fast += pp_stats.alloc_stats.fast;
> > > + stats->alloc_stats.slow += pp_stats.alloc_stats.slow;
> > > + stats->alloc_stats.slow_high_order +=
> > > + pp_stats.alloc_stats.slow_high_order;
> > > + stats->alloc_stats.empty += pp_stats.alloc_stats.empty;
> > > + stats->alloc_stats.refill += pp_stats.alloc_stats.refill;
> > > + stats->alloc_stats.waive += pp_stats.alloc_stats.waive;
> > > + stats->recycle_stats.cached += pp_stats.recycle_stats.cached;
> > > + stats->recycle_stats.cache_full +=
> > > + pp_stats.recycle_stats.cache_full;
> > > + stats->recycle_stats.ring += pp_stats.recycle_stats.ring;
> > > + stats->recycle_stats.ring_full +=
> > > + pp_stats.recycle_stats.ring_full;
> > > + stats->recycle_stats.released_refcnt +=
> > > + pp_stats.recycle_stats.released_refcnt;
> >
> > Am i right in saying, these are all software stats? They are also
> > generic for any receive queue using the page pool?
>
> yes, these stats are accounted by the kernel so they are sw stats, but I guess
> xdp ones are sw as well, right?
>
> >
> > It seems odd the driver is doing the addition here. Why not pass stats
> > into page_pool_get_stats()? That will make it easier when you add
> > additional statistics?
> >
> > I'm also wondering if ethtool -S is even the correct API. It should be
> > for hardware dependent statistics, those which change between
> > implementations. Where as these statistics should be generic. Maybe
> > they should be in /sys/class/net/ethX/statistics/ and the driver
> > itself is not even involved, the page pool code implements it?
>
> I do not have a strong opinion on it, but I can see an issue for some drivers
> (e.g. mvpp2 iirc) where page_pools are not specific for each net_device but are shared
> between multiple ports, so maybe it is better to allow the driver to decide how
> to report them. What do you think?
When I did the implementation of this API the feedback was essentially
that the drivers should be responsible for reporting the stats of their
active page_pool structures; this is why the first driver to use this
(mlx5) uses the API and outputs the stats via ethtool -S.
I have no strong preference, either, but I think that exposing the stats
via an API for the drivers to consume is less tricky; the driver knows
which page_pools are active and which pool is associated with which
RX-queue, and so on.
If there is general consensus for a different approach amongst the
page_pool maintainers, I am happy to implement it.
Thanks,
Joe
^ permalink raw reply [flat|nested] 9+ messages in thread* Re: [PATCH net-next] net: mvneta: add support for page_pool_get_stats
2022-04-06 23:01 ` Joe Damato
@ 2022-04-06 23:36 ` Andrew Lunn
2022-04-07 16:48 ` Lorenzo Bianconi
0 siblings, 1 reply; 9+ messages in thread
From: Andrew Lunn @ 2022-04-06 23:36 UTC (permalink / raw)
To: Joe Damato
Cc: Lorenzo Bianconi, netdev, lorenzo.bianconi, davem, kuba, pabeni,
thomas.petazzoni, linux, jbrouer, ilias.apalodimas
On Wed, Apr 06, 2022 at 04:01:37PM -0700, Joe Damato wrote:
> On Wed, Apr 06, 2022 at 04:02:44PM +0200, Lorenzo Bianconi wrote:
> > > > +static void mvneta_ethtool_update_pp_stats(struct mvneta_port *pp,
> > > > + struct page_pool_stats *stats)
> > > > +{
> > > > + int i;
> > > > +
> > > > + memset(stats, 0, sizeof(*stats));
> > > > + for (i = 0; i < rxq_number; i++) {
> > > > + struct page_pool *page_pool = pp->rxqs[i].page_pool;
> > > > + struct page_pool_stats pp_stats = {};
> > > > +
> > > > + if (!page_pool_get_stats(page_pool, &pp_stats))
> > > > + continue;
> > > > +
> > > > + stats->alloc_stats.fast += pp_stats.alloc_stats.fast;
> > > > + stats->alloc_stats.slow += pp_stats.alloc_stats.slow;
> > > > + stats->alloc_stats.slow_high_order +=
> > > > + pp_stats.alloc_stats.slow_high_order;
> > > > + stats->alloc_stats.empty += pp_stats.alloc_stats.empty;
> > > > + stats->alloc_stats.refill += pp_stats.alloc_stats.refill;
> > > > + stats->alloc_stats.waive += pp_stats.alloc_stats.waive;
> > > > + stats->recycle_stats.cached += pp_stats.recycle_stats.cached;
> > > > + stats->recycle_stats.cache_full +=
> > > > + pp_stats.recycle_stats.cache_full;
> > > > + stats->recycle_stats.ring += pp_stats.recycle_stats.ring;
> > > > + stats->recycle_stats.ring_full +=
> > > > + pp_stats.recycle_stats.ring_full;
> > > > + stats->recycle_stats.released_refcnt +=
> > > > + pp_stats.recycle_stats.released_refcnt;
> > >
> > > Am i right in saying, these are all software stats? They are also
> > > generic for any receive queue using the page pool?
> >
> > yes, these stats are accounted by the kernel so they are sw stats, but I guess
> > xdp ones are sw as well, right?
> >
> > >
> > > It seems odd the driver is doing the addition here. Why not pass stats
> > > into page_pool_get_stats()? That will make it easier when you add
> > > additional statistics?
> > >
> > > I'm also wondering if ethtool -S is even the correct API. It should be
> > > for hardware dependent statistics, those which change between
> > > implementations. Where as these statistics should be generic. Maybe
> > > they should be in /sys/class/net/ethX/statistics/ and the driver
> > > itself is not even involved, the page pool code implements it?
> >
> > I do not have a strong opinion on it, but I can see an issue for some drivers
> > (e.g. mvpp2 iirc) where page_pools are not specific for each net_device but are shared
> > between multiple ports, so maybe it is better to allow the driver to decide how
> > to report them. What do you think?
>
> When I did the implementation of this API the feedback was essentially
> that the drivers should be responsible for reporting the stats of their
> active page_pool structures; this is why the first driver to use this
> (mlx5) uses the API and outputs the stats via ethtool -S.
>
> I have no strong preference, either, but I think that exposing the stats
> via an API for the drivers to consume is less tricky; the driver knows
> which page_pools are active and which pool is associated with which
> RX-queue, and so on.
>
> If there is general consensus for a different approach amongst the
> page_pool maintainers, I am happy to implement it.
If we keep it in the drivers, it would be good to try to move some of
the code into the core, to keep cut/paste to a minimum. We want the
same strings for every driver for example, and it looks like it is
going to be hard to add new counters, since you will need to touch
every driver using the page pool.
Maybe even consider adding ETH_SS_PAGE_POOL. You can then put
page_pool_get_sset_count() and page_pool_get_sset_strings() as helpers
in the core, and the driver just needs to implement the get_stats()
part, again with a helper in the core which can do most of the work.
Andrew
^ permalink raw reply [flat|nested] 9+ messages in thread* Re: [PATCH net-next] net: mvneta: add support for page_pool_get_stats
2022-04-06 23:36 ` Andrew Lunn
@ 2022-04-07 16:48 ` Lorenzo Bianconi
0 siblings, 0 replies; 9+ messages in thread
From: Lorenzo Bianconi @ 2022-04-07 16:48 UTC (permalink / raw)
To: Andrew Lunn
Cc: Joe Damato, netdev, lorenzo.bianconi, davem, kuba, pabeni,
thomas.petazzoni, linux, jbrouer, ilias.apalodimas
[-- Attachment #1: Type: text/plain, Size: 4139 bytes --]
> On Wed, Apr 06, 2022 at 04:01:37PM -0700, Joe Damato wrote:
> > On Wed, Apr 06, 2022 at 04:02:44PM +0200, Lorenzo Bianconi wrote:
> > > > > +static void mvneta_ethtool_update_pp_stats(struct mvneta_port *pp,
> > > > > + struct page_pool_stats *stats)
> > > > > +{
> > > > > + int i;
> > > > > +
> > > > > + memset(stats, 0, sizeof(*stats));
> > > > > + for (i = 0; i < rxq_number; i++) {
> > > > > + struct page_pool *page_pool = pp->rxqs[i].page_pool;
> > > > > + struct page_pool_stats pp_stats = {};
> > > > > +
> > > > > + if (!page_pool_get_stats(page_pool, &pp_stats))
> > > > > + continue;
> > > > > +
> > > > > + stats->alloc_stats.fast += pp_stats.alloc_stats.fast;
> > > > > + stats->alloc_stats.slow += pp_stats.alloc_stats.slow;
> > > > > + stats->alloc_stats.slow_high_order +=
> > > > > + pp_stats.alloc_stats.slow_high_order;
> > > > > + stats->alloc_stats.empty += pp_stats.alloc_stats.empty;
> > > > > + stats->alloc_stats.refill += pp_stats.alloc_stats.refill;
> > > > > + stats->alloc_stats.waive += pp_stats.alloc_stats.waive;
> > > > > + stats->recycle_stats.cached += pp_stats.recycle_stats.cached;
> > > > > + stats->recycle_stats.cache_full +=
> > > > > + pp_stats.recycle_stats.cache_full;
> > > > > + stats->recycle_stats.ring += pp_stats.recycle_stats.ring;
> > > > > + stats->recycle_stats.ring_full +=
> > > > > + pp_stats.recycle_stats.ring_full;
> > > > > + stats->recycle_stats.released_refcnt +=
> > > > > + pp_stats.recycle_stats.released_refcnt;
> > > >
> > > > Am i right in saying, these are all software stats? They are also
> > > > generic for any receive queue using the page pool?
> > >
> > > yes, these stats are accounted by the kernel so they are sw stats, but I guess
> > > xdp ones are sw as well, right?
> > >
> > > >
> > > > It seems odd the driver is doing the addition here. Why not pass stats
> > > > into page_pool_get_stats()? That will make it easier when you add
> > > > additional statistics?
> > > >
> > > > I'm also wondering if ethtool -S is even the correct API. It should be
> > > > for hardware dependent statistics, those which change between
> > > > implementations. Where as these statistics should be generic. Maybe
> > > > they should be in /sys/class/net/ethX/statistics/ and the driver
> > > > itself is not even involved, the page pool code implements it?
> > >
> > > I do not have a strong opinion on it, but I can see an issue for some drivers
> > > (e.g. mvpp2 iirc) where page_pools are not specific for each net_device but are shared
> > > between multiple ports, so maybe it is better to allow the driver to decide how
> > > to report them. What do you think?
> >
> > When I did the implementation of this API the feedback was essentially
> > that the drivers should be responsible for reporting the stats of their
> > active page_pool structures; this is why the first driver to use this
> > (mlx5) uses the API and outputs the stats via ethtool -S.
> >
> > I have no strong preference, either, but I think that exposing the stats
> > via an API for the drivers to consume is less tricky; the driver knows
> > which page_pools are active and which pool is associated with which
> > RX-queue, and so on.
> >
> > If there is general consensus for a different approach amongst the
> > page_pool maintainers, I am happy to implement it.
>
> If we keep it in the drivers, it would be good to try to move some of
> the code into the core, to keep cut/paste to a minimum. We want the
> same strings for every driver for example, and it looks like it is
> going to be hard to add new counters, since you will need to touch
> every driver using the page pool.
>
> Maybe even consider adding ETH_SS_PAGE_POOL. You can then put
> page_pool_get_sset_count() and page_pool_get_sset_strings() as helpers
> in the core, and the driver just needs to implement the get_stats()
> part, again with a helper in the core which can do most of the work.
ack, fine. I will share a RFC patch to continue the discussion.
Regards,
Lorenzo
>
> Andrew
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 228 bytes --]
^ permalink raw reply [flat|nested] 9+ messages in thread
end of thread, other threads:[~2022-04-07 16:48 UTC | newest]
Thread overview: 9+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-04-05 20:32 [PATCH net-next] net: mvneta: add support for page_pool_get_stats Lorenzo Bianconi
2022-04-06 12:49 ` Andrew Lunn
2022-04-06 12:53 ` Ilias Apalodimas
2022-04-06 13:05 ` Lorenzo Bianconi
2022-04-06 13:38 ` Andrew Lunn
2022-04-06 14:02 ` Lorenzo Bianconi
2022-04-06 23:01 ` Joe Damato
2022-04-06 23:36 ` Andrew Lunn
2022-04-07 16:48 ` Lorenzo Bianconi
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).