public inbox for netdev@vger.kernel.org
 help / color / mirror / Atom feed
From: Ahmed Zaki <anzaki@gmail.com>
To: netfilter-devel@vger.kernel.org, pablo@netfilter.org, fw@strlen.de
Cc: coreteam@netfilter.org, netdev@vger.kernel.org
Subject: [PATCH nf-next 2/2] netfilter: flowtable: update netdev stats with HW_OFFLOAD flows
Date: Tue, 17 Mar 2026 17:48:51 -0600	[thread overview]
Message-ID: <20260317234851.234466-3-anzaki@gmail.com> (raw)
In-Reply-To: <20260317234851.234466-1-anzaki@gmail.com>

SNMP-based network monitoring systems (and maybe other tools) rely on
netdev stats to report the network traffic. We currently do not update
the netdev stats with the offloaded flows stats which creates
discrepancies in the stats with some of these devices inside a bigger
network.

Update the nedev stats with the hardware offloaded flows' stats. The
stats are updated periodically in flow_offload_work_stats() and also
once in flow_offload_work_del() before the flow is deleted. For this,
flow_offload_work_del() had to be moved below flow_offload_tuple_stats()

Signed-off-by: Ahmed Zaki <anzaki@gmail.com>
---
 net/netfilter/nf_flow_table_offload.c | 59 ++++++++++++++++++++++++---
 1 file changed, 53 insertions(+), 6 deletions(-)

diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
index b2e4fb6fa011..fb325d4a1131 100644
--- a/net/netfilter/nf_flow_table_offload.c
+++ b/net/netfilter/nf_flow_table_offload.c
@@ -925,13 +925,41 @@ static void flow_offload_work_add(struct flow_offload_work *offload)
 	nf_flow_offload_destroy(flow_rule);
 }
 
-static void flow_offload_work_del(struct flow_offload_work *offload)
+static void flow_offload_netdev_update(struct flow_offload_work *offload,
+				       struct flow_stats *stats)
 {
-	clear_bit(IPS_HW_OFFLOAD_BIT, &offload->flow->ct->status);
-	flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_ORIGINAL);
-	if (test_bit(NF_FLOW_HW_BIDIRECTIONAL, &offload->flow->flags))
-		flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_REPLY);
-	set_bit(NF_FLOW_HW_DEAD, &offload->flow->flags);
+	const struct flow_offload_tuple *tuple;
+	struct net_device *indev, *outdev;
+	struct net *net;
+
+	rcu_read_lock();
+	net = read_pnet(&offload->flowtable->net);
+	if (stats[0].pkts) {
+		tuple = &offload->flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple;
+		indev  = dev_get_by_index_rcu(net, tuple->iifidx);
+		if (indev)
+			dev_sw_netstats_rx_add(indev,
+					       stats[0].pkts, stats[0].bytes);
+
+		outdev = dev_get_by_index_rcu(net, tuple->out.ifidx);
+		if (outdev)
+			dev_sw_netstats_tx_add(outdev,
+					       stats[0].pkts, stats[0].bytes);
+	}
+
+	if (stats[1].pkts) {
+		tuple = &offload->flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple;
+		indev  = dev_get_by_index_rcu(net, tuple->iifidx);
+		if (indev)
+			dev_sw_netstats_rx_add(indev,
+					       stats[1].pkts, stats[1].bytes);
+
+		outdev = dev_get_by_index_rcu(net, tuple->out.ifidx);
+		if (outdev)
+			dev_sw_netstats_tx_add(outdev,
+					       stats[1].pkts, stats[1].bytes);
+	}
+	rcu_read_unlock();
 }
 
 static void flow_offload_tuple_stats(struct flow_offload_work *offload,
@@ -968,6 +996,25 @@ static void flow_offload_work_stats(struct flow_offload_work *offload)
 				       FLOW_OFFLOAD_DIR_REPLY,
 				       stats[1].pkts, stats[1].bytes);
 	}
+
+	flow_offload_netdev_update(offload, stats);
+}
+
+static void flow_offload_work_del(struct flow_offload_work *offload)
+{
+	struct flow_stats stats[FLOW_OFFLOAD_DIR_MAX] = {};
+
+	flow_offload_tuple_stats(offload, FLOW_OFFLOAD_DIR_ORIGINAL, &stats[0]);
+	if (test_bit(NF_FLOW_HW_BIDIRECTIONAL, &offload->flow->flags))
+		flow_offload_tuple_stats(offload, FLOW_OFFLOAD_DIR_REPLY,
+					 &stats[1]);
+	flow_offload_netdev_update(offload, stats);
+
+	clear_bit(IPS_HW_OFFLOAD_BIT, &offload->flow->ct->status);
+	flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_ORIGINAL);
+	if (test_bit(NF_FLOW_HW_BIDIRECTIONAL, &offload->flow->flags))
+		flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_REPLY);
+	set_bit(NF_FLOW_HW_DEAD, &offload->flow->flags);
 }
 
 static void flow_offload_work_handler(struct work_struct *work)
-- 
2.43.0


  parent reply	other threads:[~2026-03-17 23:49 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-03-17 23:48 [PATCH nf-next 0/2] Update netdev stats with offloaded flows Ahmed Zaki
2026-03-17 23:48 ` [PATCH nf-next 1/2] net: treewide: pass number of pkts to dev_sw_netstats_rx_add() Ahmed Zaki
2026-03-19  2:12   ` Jakub Kicinski
2026-03-17 23:48 ` Ahmed Zaki [this message]
2026-03-18  0:32   ` [PATCH nf-next 2/2] netfilter: flowtable: update netdev stats with HW_OFFLOAD flows bot+bpf-ci

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260317234851.234466-3-anzaki@gmail.com \
    --to=anzaki@gmail.com \
    --cc=coreteam@netfilter.org \
    --cc=fw@strlen.de \
    --cc=netdev@vger.kernel.org \
    --cc=netfilter-devel@vger.kernel.org \
    --cc=pablo@netfilter.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox