Netdev List
 help / color / mirror / Atom feed
From: Stephen Hemminger <stephen@networkplumber.org>
To: netdev@vger.kernel.org
Cc: jhs@mojatatu.com, Stephen Hemminger <stephen@networkplumber.org>,
	Jiri Pirko <jiri@resnulli.us>,
	"David S. Miller" <davem@davemloft.net>,
	Eric Dumazet <edumazet@google.com>,
	Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
	Simon Horman <horms@kernel.org>,
	linux-kernel@vger.kernel.org (open list)
Subject: [PATCH net-next v5 5/5] net/sched: netem: add per-impairment extended statistics
Date: Sat,  9 May 2026 10:03:26 -0700	[thread overview]
Message-ID: <20260509171123.307549-6-stephen@networkplumber.org> (raw)
In-Reply-To: <20260509171123.307549-1-stephen@networkplumber.org>

Add 64-bit counters for each impairment netem applies (delay, loss,
ECN marking, corruption, duplication, reordering) and for skb
allocation failures during enqueue. Exposed through TCA_STATS_APP
as struct tc_netem_xstats.

Counters increment when an impairment is occurs, independent of later
events that may mask its on-wire effect. Added allocation_errors
(similar to sch_fq) to account for when impairment could not be
applied due to memory pressure, etc.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---

Note to reviewers. The READ_ONCE/WRITE_ONCE pattern is to
align with upcoming changes removing qdisc_lock.
For some reason current AI prompts are obsessed with complaining
about 64 bit torn read/write on these, since the counters are
informational only, any such complaints are false positive.

Addition to iproute2 will be sent seperately.

 include/uapi/linux/pkt_sched.h | 10 +++++++
 net/sched/sch_netem.c          | 55 ++++++++++++++++++++++++++++++----
 2 files changed, 59 insertions(+), 6 deletions(-)

diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 66e8072f44df..490efd288526 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -569,6 +569,16 @@ struct tc_netem_gemodel {
 #define NETEM_DIST_SCALE	8192
 #define NETEM_DIST_MAX		16384
 
+struct tc_netem_xstats {
+	__u64	delayed;	/* packets delayed */
+	__u64	dropped;	/* packets dropped by loss model      */
+	__u64	corrupted;	/* packets with bit errors injected   */
+	__u64	duplicated;	/* duplicate packets generated        */
+	__u64	reordered;	/* packets sent out of order          */
+	__u64	ecn_marked;	/* packets ECN CE-marked (not dropped)*/
+	__u64	allocation_errors;
+};
+
 /* DRR */
 
 enum {
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 699c734e4c8b..2433295c3920 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -152,6 +152,15 @@ struct netem_sched_data {
 		u8  state;
 	} clg;
 
+	/* Impairment counters */
+	u64			delayed;
+	u64			dropped;
+	u64			corrupted;
+	u64			duplicated;
+	u64			ecn_marked;
+	u64			reordered;
+	u64			allocation_errors;
+
 	/* Cold tail: slot reschedule config and the watchdog timer. */
 	struct tc_netem_slot	slot_config;
 	struct qdisc_watchdog	watchdog;
@@ -462,16 +471,21 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 	skb->prev = NULL;
 
 	/* Random duplication */
-	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor, &q->prng))
+	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor, &q->prng)) {
 		++count;
+		WRITE_ONCE(q->duplicated, q->duplicated + 1);
+	}
 
 	/* Drop packet? */
 	if (loss_event(q)) {
-		if (q->ecn && INET_ECN_set_ce(skb))
-			qdisc_qstats_drop(sch); /* mark packet */
-		else
+		if (q->ecn && INET_ECN_set_ce(skb)) {
+			WRITE_ONCE(q->ecn_marked, q->ecn_marked + 1);
+		} else {
+			WRITE_ONCE(q->dropped, q->dropped + 1);
 			--count;
+		}
 	}
+
 	if (count == 0) {
 		qdisc_qstats_drop(sch);
 		__qdisc_drop(skb, to_free);
@@ -488,8 +502,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 	 * If we need to duplicate packet, then clone it before
 	 * original is modified.
 	 */
-	if (count > 1)
+	if (count > 1) {
 		skb2 = skb_clone(skb, GFP_ATOMIC);
+		if (!skb2)
+			WRITE_ONCE(q->allocation_errors, q->allocation_errors + 1);
+	}
 
 	/*
 	 * Randomized packet corruption.
@@ -500,8 +517,10 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor, &q->prng)) {
 		if (skb_is_gso(skb)) {
 			skb = netem_segment(skb, sch, to_free);
-			if (!skb)
+			if (!skb) {
+				WRITE_ONCE(q->allocation_errors, q->allocation_errors + 1);
 				goto finish_segs;
+			}
 
 			segs = skb->next;
 			skb_mark_not_on_list(skb);
@@ -510,11 +529,13 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 
 		skb = skb_unshare(skb, GFP_ATOMIC);
 		if (unlikely(!skb)) {
+			WRITE_ONCE(q->allocation_errors, q->allocation_errors + 1);
 			qdisc_qstats_drop(sch);
 			goto finish_segs;
 		}
 		if (skb_linearize(skb) ||
 		    (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb))) {
+			WRITE_ONCE(q->allocation_errors, q->allocation_errors + 1);
 			qdisc_drop(skb, sch, to_free);
 			skb = NULL;
 			goto finish_segs;
@@ -523,6 +544,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 		if (skb->len) {
 			u32 offset = get_random_u32_below(skb->len);
 			skb->data[offset] ^= 1 << get_random_u32_below(8);
+			WRITE_ONCE(q->corrupted, q->corrupted + 1);
 		}
 	}
 
@@ -604,12 +626,16 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 
 		cb->time_to_send = now + delay;
 		++q->counter;
+		if (delay)
+			WRITE_ONCE(q->delayed, q->delayed + 1);
+
 		tfifo_enqueue(skb, sch);
 	} else {
 		/*
 		 * Do re-ordering by putting one out of N packets at the front
 		 * of the queue.
 		 */
+		WRITE_ONCE(q->reordered, q->reordered + 1);
 		cb->time_to_send = ktime_get_ns();
 		q->counter = 0;
 
@@ -1348,6 +1374,22 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
 	return -1;
 }
 
+static int netem_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+	struct netem_sched_data *q = qdisc_priv(sch);
+	struct tc_netem_xstats st = {
+		.delayed    = READ_ONCE(q->delayed),
+		.dropped    = READ_ONCE(q->dropped),
+		.corrupted  = READ_ONCE(q->corrupted),
+		.duplicated = READ_ONCE(q->duplicated),
+		.reordered  = READ_ONCE(q->reordered),
+		.ecn_marked = READ_ONCE(q->ecn_marked),
+		.allocation_errors = READ_ONCE(q->allocation_errors),
+	};
+
+	return gnet_stats_copy_app(d, &st, sizeof(st));
+}
+
 static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
 			  struct sk_buff *skb, struct tcmsg *tcm)
 {
@@ -1410,6 +1452,7 @@ static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
 	.destroy	=	netem_destroy,
 	.change		=	netem_change,
 	.dump		=	netem_dump,
+	.dump_stats	=	netem_dump_stats,
 	.owner		=	THIS_MODULE,
 };
 MODULE_ALIAS_NET_SCH("netem");
-- 
2.53.0


      parent reply	other threads:[~2026-05-09 17:11 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-05-09 17:03 [PATCH net-next v5 0/5] net/sched: netem: enhancements Stephen Hemminger
2026-05-09 17:03 ` [PATCH net-next v5 1/5] net/sched: netem: reorder struct netem_sched_data Stephen Hemminger
2026-05-09 17:03 ` [PATCH net-next v5 2/5] net/sched: netem: remove useless VERSION Stephen Hemminger
2026-05-09 17:03 ` [PATCH net-next v5 3/5] net/sched: netem: replace pr_info with netlink extack error messages Stephen Hemminger
2026-05-09 17:03 ` [PATCH net-next v5 4/5] net/sched: netem: handle multi-segment skb in corruption Stephen Hemminger
2026-05-09 17:03 ` Stephen Hemminger [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260509171123.307549-6-stephen@networkplumber.org \
    --to=stephen@networkplumber.org \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=horms@kernel.org \
    --cc=jhs@mojatatu.com \
    --cc=jiri@resnulli.us \
    --cc=kuba@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox