From: Jeremy Kerr <jk@codeconstruct.com.au>
To: David Ahern <dsahern@kernel.org>,
"David S. Miller" <davem@davemloft.net>,
Eric Dumazet <edumazet@google.com>,
Jakub Kicinski <kuba@kernel.org>,
Paolo Abeni <pabeni@redhat.com>
Cc: netdev@vger.kernel.org
Subject: [PATCH net-next v3 1/3] net: core,vrf: Change pcpu_dstat fields to u64_stats_t
Date: Fri, 07 Jun 2024 18:25:24 +0800 [thread overview]
Message-ID: <20240607-dstats-v3-1-cc781fe116f7@codeconstruct.com.au> (raw)
In-Reply-To: <20240607-dstats-v3-0-cc781fe116f7@codeconstruct.com.au>
The pcpu_sw_netstats and pcpu_lstats structs both contain a set of
u64_stats_t fields for individual stats, but pcpu_dstats uses u64s
instead.
Make this consistent by using u64_stats_t across all stats types.
The per-cpu dstats are only used by the vrf driver at present, so update
that driver as part of this change.
Signed-off-by: Jeremy Kerr <jk@codeconstruct.com.au>
---
v2:
- use proper accessor in rx drop accounting
---
drivers/net/vrf.c | 38 ++++++++++++++++++++++----------------
include/linux/netdevice.h | 12 ++++++------
2 files changed, 28 insertions(+), 22 deletions(-)
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 3a252ac5dd28..5018831b2a79 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -126,8 +126,8 @@ static void vrf_rx_stats(struct net_device *dev, int len)
struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
u64_stats_update_begin(&dstats->syncp);
- dstats->rx_packets++;
- dstats->rx_bytes += len;
+ u64_stats_inc(&dstats->rx_packets);
+ u64_stats_add(&dstats->rx_bytes, len);
u64_stats_update_end(&dstats->syncp);
}
@@ -150,11 +150,11 @@ static void vrf_get_stats64(struct net_device *dev,
dstats = per_cpu_ptr(dev->dstats, i);
do {
start = u64_stats_fetch_begin(&dstats->syncp);
- tbytes = dstats->tx_bytes;
- tpkts = dstats->tx_packets;
- tdrops = dstats->tx_drops;
- rbytes = dstats->rx_bytes;
- rpkts = dstats->rx_packets;
+ tbytes = u64_stats_read(&dstats->tx_bytes);
+ tpkts = u64_stats_read(&dstats->tx_packets);
+ tdrops = u64_stats_read(&dstats->tx_drops);
+ rbytes = u64_stats_read(&dstats->rx_bytes);
+ rpkts = u64_stats_read(&dstats->rx_packets);
} while (u64_stats_fetch_retry(&dstats->syncp, start));
stats->tx_bytes += tbytes;
stats->tx_packets += tpkts;
@@ -408,10 +408,15 @@ static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
skb->protocol = eth_type_trans(skb, dev);
- if (likely(__netif_rx(skb) == NET_RX_SUCCESS))
+ if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) {
vrf_rx_stats(dev, len);
- else
- this_cpu_inc(dev->dstats->rx_drops);
+ } else {
+ struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+ u64_stats_update_begin(&dstats->syncp);
+ u64_stats_inc(&dstats->rx_drops);
+ u64_stats_update_end(&dstats->syncp);
+ }
return NETDEV_TX_OK;
}
@@ -599,19 +604,20 @@ static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
int len = skb->len;
netdev_tx_t ret = is_ip_tx_frame(skb, dev);
+ u64_stats_update_begin(&dstats->syncp);
if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
- struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
- u64_stats_update_begin(&dstats->syncp);
- dstats->tx_packets++;
- dstats->tx_bytes += len;
- u64_stats_update_end(&dstats->syncp);
+ u64_stats_inc(&dstats->tx_packets);
+ u64_stats_add(&dstats->tx_bytes, len);
} else {
- this_cpu_inc(dev->dstats->tx_drops);
+ u64_stats_inc(&dstats->tx_drops);
}
+ u64_stats_update_end(&dstats->syncp);
return ret;
}
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d20c6c99eb88..f148a01dd1d1 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2731,12 +2731,12 @@ struct pcpu_sw_netstats {
} __aligned(4 * sizeof(u64));
struct pcpu_dstats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 rx_drops;
- u64 tx_packets;
- u64 tx_bytes;
- u64 tx_drops;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_drops;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
+ u64_stats_t tx_drops;
struct u64_stats_sync syncp;
} __aligned(8 * sizeof(u64));
--
2.39.2
next prev parent reply other threads:[~2024-06-07 10:25 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-06-07 10:25 [PATCH net-next v3 0/3] net: core: Unify dstats with tstats and lstats, implement generic dstats collection Jeremy Kerr
2024-06-07 10:25 ` Jeremy Kerr [this message]
2024-06-08 13:37 ` [PATCH net-next v3 1/3] net: core,vrf: Change pcpu_dstat fields to u64_stats_t Simon Horman
2024-06-07 10:25 ` [PATCH net-next v3 2/3] net: core: Implement dstats-type stats collections Jeremy Kerr
2024-06-08 13:37 ` Simon Horman
2024-06-07 10:25 ` [PATCH net-next v3 3/3] net: vrf: move to generic dstat helpers Jeremy Kerr
2024-06-08 13:37 ` Simon Horman
2024-06-10 15:19 ` [PATCH net-next v3 0/3] net: core: Unify dstats with tstats and lstats, implement generic dstats collection David Ahern
2024-06-12 3:00 ` patchwork-bot+netdevbpf
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240607-dstats-v3-1-cc781fe116f7@codeconstruct.com.au \
--to=jk@codeconstruct.com.au \
--cc=davem@davemloft.net \
--cc=dsahern@kernel.org \
--cc=edumazet@google.com \
--cc=kuba@kernel.org \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).