public inbox for netdev@vger.kernel.org
 help / color / mirror / Atom feed
From: Stephen Hemminger <stephen@networkplumber.org>
To: netdev@vger.kernel.org
Cc: jhs@mojatatu.com, jiri@resnulli.us,
	Stephen Hemminger <stephen@networkplumber.org>
Subject: [PATCH net-next v2 4/5] net/sched: netem: add per-impairment extended statistics
Date: Sun,  3 May 2026 12:52:02 -0700	[thread overview]
Message-ID: <20260503195348.521225-5-stephen@networkplumber.org> (raw)
In-Reply-To: <20260503195348.521225-1-stephen@networkplumber.org>

Adds new counters that keep track of when netem applied
impairments (delay, loss, corruption, duplication, reordering).
Add a struct tc_netem_xstats reported via TCA_STATS_APP so that
userspace (tc -s qdisc show) can display per-impairment counters.

Use the WRITE_ONCE/READ_ONCE pattern to allow for lockless
qdisc usage.

Accompanying iproute2 change is submitted separately.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 include/uapi/linux/pkt_sched.h |  9 ++++++
 net/sched/sch_netem.c          | 55 ++++++++++++++++++++++++++--------
 2 files changed, 52 insertions(+), 12 deletions(-)

diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 66e8072f44df..1c84c8076e22 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -569,6 +569,15 @@ struct tc_netem_gemodel {
 #define NETEM_DIST_SCALE	8192
 #define NETEM_DIST_MAX		16384
 
+struct tc_netem_xstats {
+	__u64	delayed;	/* packets delayed */
+	__u64	dropped;	/* packets dropped by loss model      */
+	__u64	corrupted;	/* packets with bit errors injected   */
+	__u64	duplicated;	/* duplicate packets generated        */
+	__u64	reordered;	/* packets sent out of order          */
+	__u64	ecn_marked;	/* packets ECN CE-marked (not dropped)*/
+};
+
 /* DRR */
 
 enum {
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 53961d1e70d7..e710898ce96e 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -100,8 +100,7 @@ struct netem_sched_data {
 	s64			latency;
 	s64			jitter;
 	u64			rate;
-	u32			gap;
-	u32			loss;
+	u64			delayed;
 
 	/* Cacheline 1: zero-check scalars and correlation states. */
 	u32			duplicate;
@@ -112,7 +111,8 @@ struct netem_sched_data {
 		u32 last;
 		u32 rho;
 	} delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
-	u8			loss_model;
+	u32			gap;
+	u32			loss;
 
 	/* Cacheline 2: PRNG, distribution tables, slot dequeue state etc. */
 	struct prng {
@@ -125,21 +125,27 @@ struct netem_sched_data {
 		s32 packets_left;
 		s32 bytes_left;
 	} slot;
-	struct disttable	*slot_dist;
 	struct Qdisc		*qdisc;
+	u8			loss_model;
 
 	/*
-	 * Warm: rate-shaping parameters (only read when rate != 0) and
-	 * configuration-only fields.  The fast path reads sch->limit, not
-	 * q->limit.
+	 * Rare-write impairment counters (read together by netem_dump) and
+	 * rate-shaping parameters (only consulted when rate != 0).  The
+	 * fast path reads sch->limit, not q->limit.
 	 */
+	u64			dropped;
+	u64			corrupted;
+	u64			duplicated;
+	u64			ecn_marked;
+	u64			reordered;
 	s32			packet_overhead;
 	u32			cell_size;
 	struct reciprocal_value	cell_size_reciprocal;
 	s32			cell_overhead;
 	u32			limit;
 
-	/* Correlated Loss Generation models */
+	/* Cold tail: slot reschedule config and the watchdog timer. */
+	struct disttable	*slot_dist;
 	struct clgstate {
 		/* 4-states and Gilbert-Elliot models */
 		u32 a1;	/* p13 for 4-states or p for GE */
@@ -152,7 +158,6 @@ struct netem_sched_data {
 		u8  state;
 	} clg;
 
-	/* Cold tail: slot reschedule config and the watchdog timer. */
 	struct tc_netem_slot	slot_config;
 	struct qdisc_watchdog	watchdog;
 };
@@ -462,17 +467,23 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 	skb->prev = NULL;
 
 	/* Random duplication */
-	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor, &q->prng))
+	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor, &q->prng)) {
 		++count;
+		WRITE_ONCE(q->duplicated, q->duplicated + 1);
+	}
 
 	/* Drop packet? */
 	if (loss_event(q)) {
-		if (q->ecn && INET_ECN_set_ce(skb))
+		if (q->ecn && INET_ECN_set_ce(skb)) {
 			qdisc_qstats_drop(sch); /* mark packet */
-		else
+			WRITE_ONCE(q->ecn_marked, q->ecn_marked + 1);
+		} else {
 			--count;
+		}
 	}
+
 	if (count == 0) {
+		WRITE_ONCE(q->dropped, q->dropped + 1);
 		qdisc_qstats_drop(sch);
 		__qdisc_drop(skb, to_free);
 		return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
@@ -498,6 +509,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 	 * do it now in software before we mangle it.
 	 */
 	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor, &q->prng)) {
+		WRITE_ONCE(q->corrupted, q->corrupted + 1);
 		if (skb_is_gso(skb)) {
 			skb = netem_segment(skb, sch, to_free);
 			if (!skb)
@@ -603,12 +615,15 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 
 		cb->time_to_send = now + delay;
 		++q->counter;
+		WRITE_ONCE(q->delayed, q->delayed + 1);
+
 		tfifo_enqueue(skb, sch);
 	} else {
 		/*
 		 * Do re-ordering by putting one out of N packets at the front
 		 * of the queue.
 		 */
+		WRITE_ONCE(q->reordered, q->reordered + 1);
 		cb->time_to_send = ktime_get_ns();
 		q->counter = 0;
 
@@ -1348,6 +1363,21 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
 	return -1;
 }
 
+static int netem_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+	struct netem_sched_data *q = qdisc_priv(sch);
+	struct tc_netem_xstats st = {
+		.delayed    = READ_ONCE(q->delayed),
+		.dropped    = READ_ONCE(q->dropped),
+		.corrupted  = READ_ONCE(q->corrupted),
+		.duplicated = READ_ONCE(q->duplicated),
+		.reordered  = READ_ONCE(q->reordered),
+		.ecn_marked = READ_ONCE(q->ecn_marked),
+	};
+
+	return gnet_stats_copy_app(d, &st, sizeof(st));
+}
+
 static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
 			  struct sk_buff *skb, struct tcmsg *tcm)
 {
@@ -1410,6 +1440,7 @@ static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
 	.destroy	=	netem_destroy,
 	.change		=	netem_change,
 	.dump		=	netem_dump,
+	.dump_stats	=	netem_dump_stats,
 	.owner		=	THIS_MODULE,
 };
 MODULE_ALIAS_NET_SCH("netem");
-- 
2.53.0


  parent reply	other threads:[~2026-05-03 19:53 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-05-03 19:51 [PATCH net-next v2 0/5] net/sched: netem: fixes and improvements Stephen Hemminger
2026-05-03 19:51 ` [PATCH net-next v2 1/5] net/sched: netem: reorder struct netem_sched_data Stephen Hemminger
2026-05-03 19:52 ` [PATCH net-next v2 2/5] net/sched: netem: remove useless VERSION Stephen Hemminger
2026-05-03 19:52 ` [PATCH net-next v2 3/5] net/sched: netem: replace pr_info with netlink extack error messages Stephen Hemminger
2026-05-03 19:52 ` Stephen Hemminger [this message]
2026-05-05  0:16   ` [PATCH net-next v2 4/5] net/sched: netem: add per-impairment extended statistics Stephen Hemminger
2026-05-03 19:52 ` [PATCH net-next v2 5/5] net/sched: netem: handle multi-segment skb in corruption Stephen Hemminger
2026-05-05  0:25   ` Stephen Hemminger

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260503195348.521225-5-stephen@networkplumber.org \
    --to=stephen@networkplumber.org \
    --cc=jhs@mojatatu.com \
    --cc=jiri@resnulli.us \
    --cc=netdev@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox