* [PATCH net] net/sched: sch_red: annotate data-races in red_dump_stats()
@ 2026-04-21 14:23 Eric Dumazet
0 siblings, 0 replies; only message in thread
From: Eric Dumazet @ 2026-04-21 14:23 UTC (permalink / raw)
To: David S . Miller, Jakub Kicinski, Paolo Abeni
Cc: Simon Horman, Jamal Hadi Salim, Jiri Pirko, netdev, eric.dumazet,
Eric Dumazet
red_dump_stats() only runs with RTNL held,
reading fields that can be changed in qdisc fast path.
Add READ_ONCE()/WRITE_ONCE() annotations.
Alternative would be to acquire the qdisc spinlock, but our long-term
goal is to make qdisc dump operations lockless as much as we can.
tc_red_xstats fields don't need to be latched atomically,
otherwise this bug would have been caught earlier.
Fixes: edb09eb17ed8 ("net: sched: do not acquire qdisc spinlock in qdisc/class stats dump")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Jamal Hadi Salim <jhs@mojatatu.com>
---
net/sched/sch_red.c | 31 +++++++++++++++++++++----------
1 file changed, 21 insertions(+), 10 deletions(-)
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index c8d3d09f15e3919d6468964561130bfc79fb215b..432b8a3000a57b4688b3ddb5501f604d5752c67c 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -90,17 +90,20 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
case RED_PROB_MARK:
qdisc_qstats_overlimit(sch);
if (!red_use_ecn(q)) {
- q->stats.prob_drop++;
+ WRITE_ONCE(q->stats.prob_drop,
+ q->stats.prob_drop + 1);
goto congestion_drop;
}
if (INET_ECN_set_ce(skb)) {
- q->stats.prob_mark++;
+ WRITE_ONCE(q->stats.prob_mark,
+ q->stats.prob_mark + 1);
skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
if (!skb)
return NET_XMIT_CN | ret;
} else if (!red_use_nodrop(q)) {
- q->stats.prob_drop++;
+ WRITE_ONCE(q->stats.prob_drop,
+ q->stats.prob_drop + 1);
goto congestion_drop;
}
@@ -111,17 +114,20 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
reason = QDISC_DROP_OVERLIMIT;
qdisc_qstats_overlimit(sch);
if (red_use_harddrop(q) || !red_use_ecn(q)) {
- q->stats.forced_drop++;
+ WRITE_ONCE(q->stats.forced_drop,
+ q->stats.forced_drop + 1);
goto congestion_drop;
}
if (INET_ECN_set_ce(skb)) {
- q->stats.forced_mark++;
+ WRITE_ONCE(q->stats.forced_mark,
+ q->stats.forced_mark + 1);
skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
if (!skb)
return NET_XMIT_CN | ret;
} else if (!red_use_nodrop(q)) {
- q->stats.forced_drop++;
+ WRITE_ONCE(q->stats.forced_drop,
+ q->stats.forced_drop + 1);
goto congestion_drop;
}
@@ -135,7 +141,8 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
sch->qstats.backlog += len;
sch->q.qlen++;
} else if (net_xmit_drop_count(ret)) {
- q->stats.pdrop++;
+ WRITE_ONCE(q->stats.pdrop,
+ q->stats.pdrop + 1);
qdisc_qstats_drop(sch);
}
return ret;
@@ -463,9 +470,13 @@ static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
&hw_stats_request);
}
- st.early = q->stats.prob_drop + q->stats.forced_drop;
- st.pdrop = q->stats.pdrop;
- st.marked = q->stats.prob_mark + q->stats.forced_mark;
+ st.early = READ_ONCE(q->stats.prob_drop) +
+ READ_ONCE(q->stats.forced_drop);
+
+ st.pdrop = READ_ONCE(q->stats.pdrop);
+
+ st.marked = READ_ONCE(q->stats.prob_mark) +
+ READ_ONCE(q->stats.forced_mark);
return gnet_stats_copy_app(d, &st, sizeof(st));
}
--
2.54.0.rc2.533.g4f5dca5207-goog
^ permalink raw reply related [flat|nested] only message in thread
only message in thread, other threads:[~2026-04-21 14:23 UTC | newest]
Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-04-21 14:23 [PATCH net] net/sched: sch_red: annotate data-races in red_dump_stats() Eric Dumazet
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox