* [PATCH net 1/5] net/sched: sch_cake: annotate data-races in cake_dump_stats() (I)
2026-04-23 10:23 [PATCH net 0/5] net/sched: sch_cake: annotate data-races in cake_dump_stats() (series) Eric Dumazet
@ 2026-04-23 10:23 ` Eric Dumazet
2026-04-23 12:08 ` Toke Høiland-Jørgensen
2026-04-23 10:23 ` [PATCH net 2/5] net/sched: sch_cake: annotate data-races in cake_dump_stats() (II) Eric Dumazet
` (3 subsequent siblings)
4 siblings, 1 reply; 11+ messages in thread
From: Eric Dumazet @ 2026-04-23 10:23 UTC (permalink / raw)
To: David S . Miller, Jakub Kicinski, Paolo Abeni
Cc: Simon Horman, Jamal Hadi Salim, Jiri Pirko,
Toke Høiland-Jørgensen, netdev, eric.dumazet,
Eric Dumazet, Toke Høiland-Jørgensen
cake_dump_stats() runs without qdisc spinlock being held.
In this first patch, I add READ_ONCE()/WRITE_ONCE() annotations
for the following fields:
- way_hits
- way_misses
- way_collisions
- sparse_flow_count
- decaying_flow_count
Other annotations are added in following patches, to ease code review.
Fixes: 046f6fd5daef ("sched: Add Common Applications Kept Enhanced (cake) qdisc")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: "Toke Høiland-Jørgensen" <toke@toke.dk>
---
net/sched/sch_cake.c | 42 +++++++++++++++++++++---------------------
1 file changed, 21 insertions(+), 21 deletions(-)
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 02e1fa4577ae6089646e79b901b0d21945891855..bcc601fc486b13ab1609134f1ee29ebdcb9e8a06 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -813,7 +813,7 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
i++, k = (k + 1) % CAKE_SET_WAYS) {
if (q->tags[outer_hash + k] == flow_hash) {
if (i)
- q->way_hits++;
+ WRITE_ONCE(q->way_hits, q->way_hits + 1);
if (!q->flows[outer_hash + k].set) {
/* need to increment host refcnts */
@@ -831,7 +831,7 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
for (i = 0; i < CAKE_SET_WAYS;
i++, k = (k + 1) % CAKE_SET_WAYS) {
if (!q->flows[outer_hash + k].set) {
- q->way_misses++;
+ WRITE_ONCE(q->way_misses, q->way_misses + 1);
allocate_src = cake_dsrc(flow_mode);
allocate_dst = cake_ddst(flow_mode);
goto found;
@@ -841,7 +841,7 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
/* With no empty queues, default to the original
* queue, accept the collision, update the host tags.
*/
- q->way_collisions++;
+ WRITE_ONCE(q->way_collisions, q->way_collisions + 1);
allocate_src = cake_dsrc(flow_mode);
allocate_dst = cake_ddst(flow_mode);
@@ -1917,11 +1917,11 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (!flow->set) {
list_add_tail(&flow->flowchain, &b->new_flows);
} else {
- b->decaying_flow_count--;
+ WRITE_ONCE(b->decaying_flow_count, b->decaying_flow_count - 1);
list_move_tail(&flow->flowchain, &b->new_flows);
}
flow->set = CAKE_SET_SPARSE;
- b->sparse_flow_count++;
+ WRITE_ONCE(b->sparse_flow_count, b->sparse_flow_count + 1);
flow->deficit = cake_get_flow_quantum(b, flow, q->config->flow_mode);
} else if (flow->set == CAKE_SET_SPARSE_WAIT) {
@@ -1929,7 +1929,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
* in the bulk rotation.
*/
flow->set = CAKE_SET_BULK;
- b->sparse_flow_count--;
+ WRITE_ONCE(b->sparse_flow_count, b->sparse_flow_count - 1);
b->bulk_flow_count++;
cake_inc_srchost_bulk_flow_count(b, flow, q->config->flow_mode);
@@ -2149,7 +2149,7 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
*/
if (flow->set == CAKE_SET_SPARSE) {
if (flow->head) {
- b->sparse_flow_count--;
+ WRITE_ONCE(b->sparse_flow_count, b->sparse_flow_count - 1);
b->bulk_flow_count++;
cake_inc_srchost_bulk_flow_count(b, flow, q->config->flow_mode);
@@ -2192,27 +2192,27 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
cake_dec_srchost_bulk_flow_count(b, flow, q->config->flow_mode);
cake_dec_dsthost_bulk_flow_count(b, flow, q->config->flow_mode);
- b->decaying_flow_count++;
+ WRITE_ONCE(b->decaying_flow_count, b->decaying_flow_count + 1);
} else if (flow->set == CAKE_SET_SPARSE ||
flow->set == CAKE_SET_SPARSE_WAIT) {
- b->sparse_flow_count--;
- b->decaying_flow_count++;
+ WRITE_ONCE(b->sparse_flow_count, b->sparse_flow_count - 1);
+ WRITE_ONCE(b->decaying_flow_count, b->decaying_flow_count + 1);
}
flow->set = CAKE_SET_DECAYING;
} else {
/* remove empty queue from the flowchain */
list_del_init(&flow->flowchain);
if (flow->set == CAKE_SET_SPARSE ||
- flow->set == CAKE_SET_SPARSE_WAIT)
- b->sparse_flow_count--;
- else if (flow->set == CAKE_SET_BULK) {
+ flow->set == CAKE_SET_SPARSE_WAIT) {
+ WRITE_ONCE(b->sparse_flow_count, b->sparse_flow_count - 1);
+ } else if (flow->set == CAKE_SET_BULK) {
b->bulk_flow_count--;
cake_dec_srchost_bulk_flow_count(b, flow, q->config->flow_mode);
cake_dec_dsthost_bulk_flow_count(b, flow, q->config->flow_mode);
- } else
- b->decaying_flow_count--;
-
+ } else {
+ WRITE_ONCE(b->decaying_flow_count, b->decaying_flow_count - 1);
+ }
flow->set = CAKE_SET_NONE;
}
goto begin;
@@ -3050,12 +3050,12 @@ static int cake_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
PUT_TSTAT_U32(BASE_DELAY_US,
ktime_to_us(ns_to_ktime(b->base_delay)));
- PUT_TSTAT_U32(WAY_INDIRECT_HITS, b->way_hits);
- PUT_TSTAT_U32(WAY_MISSES, b->way_misses);
- PUT_TSTAT_U32(WAY_COLLISIONS, b->way_collisions);
+ PUT_TSTAT_U32(WAY_INDIRECT_HITS, READ_ONCE(b->way_hits));
+ PUT_TSTAT_U32(WAY_MISSES, READ_ONCE(b->way_misses));
+ PUT_TSTAT_U32(WAY_COLLISIONS, READ_ONCE(b->way_collisions));
- PUT_TSTAT_U32(SPARSE_FLOWS, b->sparse_flow_count +
- b->decaying_flow_count);
+ PUT_TSTAT_U32(SPARSE_FLOWS, READ_ONCE(b->sparse_flow_count) +
+ READ_ONCE(b->decaying_flow_count));
PUT_TSTAT_U32(BULK_FLOWS, b->bulk_flow_count);
PUT_TSTAT_U32(UNRESPONSIVE_FLOWS, b->unresponsive_flow_count);
PUT_TSTAT_U32(MAX_SKBLEN, b->max_skblen);
--
2.54.0.rc2.544.gc7ae2d5bb8-goog
^ permalink raw reply related [flat|nested] 11+ messages in thread* Re: [PATCH net 1/5] net/sched: sch_cake: annotate data-races in cake_dump_stats() (I)
2026-04-23 10:23 ` [PATCH net 1/5] net/sched: sch_cake: annotate data-races in cake_dump_stats() (I) Eric Dumazet
@ 2026-04-23 12:08 ` Toke Høiland-Jørgensen
0 siblings, 0 replies; 11+ messages in thread
From: Toke Høiland-Jørgensen @ 2026-04-23 12:08 UTC (permalink / raw)
To: Eric Dumazet, David S . Miller, Jakub Kicinski, Paolo Abeni
Cc: Simon Horman, Jamal Hadi Salim, Jiri Pirko, netdev, eric.dumazet,
Eric Dumazet
Eric Dumazet <edumazet@google.com> writes:
> cake_dump_stats() runs without qdisc spinlock being held.
>
> In this first patch, I add READ_ONCE()/WRITE_ONCE() annotations
> for the following fields:
>
> - way_hits
> - way_misses
> - way_collisions
> - sparse_flow_count
> - decaying_flow_count
>
> Other annotations are added in following patches, to ease code review.
>
> Fixes: 046f6fd5daef ("sched: Add Common Applications Kept Enhanced (cake) qdisc")
> Signed-off-by: Eric Dumazet <edumazet@google.com>
> Cc: "Toke Høiland-Jørgensen" <toke@toke.dk>
Acked-by: Toke Høiland-Jørgensen <toke@toke.dk>
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH net 2/5] net/sched: sch_cake: annotate data-races in cake_dump_stats() (II)
2026-04-23 10:23 [PATCH net 0/5] net/sched: sch_cake: annotate data-races in cake_dump_stats() (series) Eric Dumazet
2026-04-23 10:23 ` [PATCH net 1/5] net/sched: sch_cake: annotate data-races in cake_dump_stats() (I) Eric Dumazet
@ 2026-04-23 10:23 ` Eric Dumazet
2026-04-23 12:10 ` Toke Høiland-Jørgensen
2026-04-23 10:23 ` [PATCH net 3/5] net/sched: sch_cake: annotate data-races in cake_dump_stats() (III) Eric Dumazet
` (2 subsequent siblings)
4 siblings, 1 reply; 11+ messages in thread
From: Eric Dumazet @ 2026-04-23 10:23 UTC (permalink / raw)
To: David S . Miller, Jakub Kicinski, Paolo Abeni
Cc: Simon Horman, Jamal Hadi Salim, Jiri Pirko,
Toke Høiland-Jørgensen, netdev, eric.dumazet,
Eric Dumazet, Toke Høiland-Jørgensen
cake_dump_stats() runs without qdisc spinlock being held.
In this second patch, I add READ_ONCE()/WRITE_ONCE() annotations
for the following fields:
- bulk_flow_count
- unresponsive_flow_count
- max_skblen
- flow_quantum
Other annotations are added in following patches, to ease code review.
Fixes: 046f6fd5daef ("sched: Add Common Applications Kept Enhanced (cake) qdisc")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: "Toke Høiland-Jørgensen" <toke@toke.dk>
---
net/sched/sch_cake.c | 34 +++++++++++++++++++---------------
1 file changed, 19 insertions(+), 15 deletions(-)
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index bcc601fc486b13ab1609134f1ee29ebdcb9e8a06..d7465ee4c5507467effd5aaaf8cfd05bcafde2cf 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -1590,7 +1590,8 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
}
if (cobalt_queue_full(&flow->cvars, &b->cparams, now))
- b->unresponsive_flow_count++;
+ WRITE_ONCE(b->unresponsive_flow_count,
+ b->unresponsive_flow_count + 1);
len = qdisc_pkt_len(skb);
q->buffer_used -= skb->truesize;
@@ -1795,7 +1796,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
}
if (unlikely(len > b->max_skblen))
- b->max_skblen = len;
+ WRITE_ONCE(b->max_skblen, len);
if (qdisc_pkt_segs(skb) > 1 && q->config->rate_flags & CAKE_FLAG_SPLIT_GSO) {
struct sk_buff *segs, *nskb;
@@ -1930,7 +1931,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
*/
flow->set = CAKE_SET_BULK;
WRITE_ONCE(b->sparse_flow_count, b->sparse_flow_count - 1);
- b->bulk_flow_count++;
+ WRITE_ONCE(b->bulk_flow_count, b->bulk_flow_count + 1);
cake_inc_srchost_bulk_flow_count(b, flow, q->config->flow_mode);
cake_inc_dsthost_bulk_flow_count(b, flow, q->config->flow_mode);
@@ -2150,7 +2151,7 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
if (flow->set == CAKE_SET_SPARSE) {
if (flow->head) {
WRITE_ONCE(b->sparse_flow_count, b->sparse_flow_count - 1);
- b->bulk_flow_count++;
+ WRITE_ONCE(b->bulk_flow_count, b->bulk_flow_count + 1);
cake_inc_srchost_bulk_flow_count(b, flow, q->config->flow_mode);
cake_inc_dsthost_bulk_flow_count(b, flow, q->config->flow_mode);
@@ -2177,7 +2178,8 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
if (!skb) {
/* this queue was actually empty */
if (cobalt_queue_empty(&flow->cvars, &b->cparams, now))
- b->unresponsive_flow_count--;
+ WRITE_ONCE(b->unresponsive_flow_count,
+ b->unresponsive_flow_count - 1);
if (flow->cvars.p_drop || flow->cvars.count ||
ktime_before(now, flow->cvars.drop_next)) {
@@ -2187,7 +2189,7 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
list_move_tail(&flow->flowchain,
&b->decaying_flows);
if (flow->set == CAKE_SET_BULK) {
- b->bulk_flow_count--;
+ WRITE_ONCE(b->bulk_flow_count, b->bulk_flow_count - 1);
cake_dec_srchost_bulk_flow_count(b, flow, q->config->flow_mode);
cake_dec_dsthost_bulk_flow_count(b, flow, q->config->flow_mode);
@@ -2206,7 +2208,7 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
flow->set == CAKE_SET_SPARSE_WAIT) {
WRITE_ONCE(b->sparse_flow_count, b->sparse_flow_count - 1);
} else if (flow->set == CAKE_SET_BULK) {
- b->bulk_flow_count--;
+ WRITE_ONCE(b->bulk_flow_count, b->bulk_flow_count - 1);
cake_dec_srchost_bulk_flow_count(b, flow, q->config->flow_mode);
cake_dec_dsthost_bulk_flow_count(b, flow, q->config->flow_mode);
@@ -2329,9 +2331,9 @@ static void cake_set_rate(struct cake_tin_data *b, u64 rate, u32 mtu,
u8 rate_shft = 0;
u64 rate_ns = 0;
- b->flow_quantum = 1514;
if (rate) {
- b->flow_quantum = max(min(rate >> 12, 1514ULL), 300ULL);
+ WRITE_ONCE(b->flow_quantum,
+ max(min(rate >> 12, 1514ULL), 300ULL));
rate_shft = 34;
rate_ns = ((u64)NSEC_PER_SEC) << rate_shft;
rate_ns = div64_u64(rate_ns, max(MIN_RATE, rate));
@@ -2339,8 +2341,10 @@ static void cake_set_rate(struct cake_tin_data *b, u64 rate, u32 mtu,
rate_ns >>= 1;
rate_shft--;
}
- } /* else unlimited, ie. zero delay */
-
+ } else {
+ /* else unlimited, ie. zero delay */
+ WRITE_ONCE(b->flow_quantum, 1514);
+ }
b->tin_rate_bps = rate;
b->tin_rate_ns = rate_ns;
b->tin_rate_shft = rate_shft;
@@ -3056,11 +3060,11 @@ static int cake_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
PUT_TSTAT_U32(SPARSE_FLOWS, READ_ONCE(b->sparse_flow_count) +
READ_ONCE(b->decaying_flow_count));
- PUT_TSTAT_U32(BULK_FLOWS, b->bulk_flow_count);
- PUT_TSTAT_U32(UNRESPONSIVE_FLOWS, b->unresponsive_flow_count);
- PUT_TSTAT_U32(MAX_SKBLEN, b->max_skblen);
+ PUT_TSTAT_U32(BULK_FLOWS, READ_ONCE(b->bulk_flow_count));
+ PUT_TSTAT_U32(UNRESPONSIVE_FLOWS, READ_ONCE(b->unresponsive_flow_count));
+ PUT_TSTAT_U32(MAX_SKBLEN, READ_ONCE(b->max_skblen));
- PUT_TSTAT_U32(FLOW_QUANTUM, b->flow_quantum);
+ PUT_TSTAT_U32(FLOW_QUANTUM, READ_ONCE(b->flow_quantum));
nla_nest_end(d->skb, ts);
}
--
2.54.0.rc2.544.gc7ae2d5bb8-goog
^ permalink raw reply related [flat|nested] 11+ messages in thread* Re: [PATCH net 2/5] net/sched: sch_cake: annotate data-races in cake_dump_stats() (II)
2026-04-23 10:23 ` [PATCH net 2/5] net/sched: sch_cake: annotate data-races in cake_dump_stats() (II) Eric Dumazet
@ 2026-04-23 12:10 ` Toke Høiland-Jørgensen
0 siblings, 0 replies; 11+ messages in thread
From: Toke Høiland-Jørgensen @ 2026-04-23 12:10 UTC (permalink / raw)
To: Eric Dumazet, David S . Miller, Jakub Kicinski, Paolo Abeni
Cc: Simon Horman, Jamal Hadi Salim, Jiri Pirko, netdev, eric.dumazet,
Eric Dumazet
Eric Dumazet <edumazet@google.com> writes:
> cake_dump_stats() runs without qdisc spinlock being held.
>
> In this second patch, I add READ_ONCE()/WRITE_ONCE() annotations
> for the following fields:
>
> - bulk_flow_count
> - unresponsive_flow_count
> - max_skblen
> - flow_quantum
>
> Other annotations are added in following patches, to ease code review.
>
> Fixes: 046f6fd5daef ("sched: Add Common Applications Kept Enhanced (cake) qdisc")
> Signed-off-by: Eric Dumazet <edumazet@google.com>
> Cc: "Toke Høiland-Jørgensen" <toke@toke.dk>
Acked-by: Toke Høiland-Jørgensen <toke@toke.dk>
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH net 3/5] net/sched: sch_cake: annotate data-races in cake_dump_stats() (III)
2026-04-23 10:23 [PATCH net 0/5] net/sched: sch_cake: annotate data-races in cake_dump_stats() (series) Eric Dumazet
2026-04-23 10:23 ` [PATCH net 1/5] net/sched: sch_cake: annotate data-races in cake_dump_stats() (I) Eric Dumazet
2026-04-23 10:23 ` [PATCH net 2/5] net/sched: sch_cake: annotate data-races in cake_dump_stats() (II) Eric Dumazet
@ 2026-04-23 10:23 ` Eric Dumazet
2026-04-23 12:11 ` Toke Høiland-Jørgensen
2026-04-23 10:23 ` [PATCH net 4/5] net/sched: sch_cake: annotate data-races in cake_dump_stats() (IV) Eric Dumazet
2026-04-23 10:23 ` [PATCH net 5/5] net/sched: sch_cake: annotate data-races in cake_dump_stats() (V) Eric Dumazet
4 siblings, 1 reply; 11+ messages in thread
From: Eric Dumazet @ 2026-04-23 10:23 UTC (permalink / raw)
To: David S . Miller, Jakub Kicinski, Paolo Abeni
Cc: Simon Horman, Jamal Hadi Salim, Jiri Pirko,
Toke Høiland-Jørgensen, netdev, eric.dumazet,
Eric Dumazet, Toke Høiland-Jørgensen
cake_dump_stats() runs without qdisc spinlock being held.
In this third patch, I add READ_ONCE()/WRITE_ONCE() annotations
for the following fields:
- packets
- tin_dropped
- tin_ecn_mark
- ack_drops
- peak_delay
- avge_delay
- base_delay
Other annotations are added in following patches, to ease code review.
Fixes: 046f6fd5daef ("sched: Add Common Applications Kept Enhanced (cake) qdisc")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: "Toke Høiland-Jørgensen" <toke@toke.dk>
---
net/sched/sch_cake.c | 38 ++++++++++++++++++++------------------
1 file changed, 20 insertions(+), 18 deletions(-)
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index d7465ee4c5507467effd5aaaf8cfd05bcafde2cf..c5aae31565e984e40937b55201b498174a37180e 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -1600,7 +1600,7 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
sch->qstats.backlog -= len;
flow->dropped++;
- b->tin_dropped++;
+ WRITE_ONCE(b->tin_dropped, b->tin_dropped + 1);
if (q->config->rate_flags & CAKE_FLAG_INGRESS)
cake_advance_shaper(q, b, skb, now, true);
@@ -1820,7 +1820,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
numsegs++;
slen += segs->len;
q->buffer_used += segs->truesize;
- b->packets++;
+ WRITE_ONCE(b->packets, b->packets + 1);
}
/* stats */
@@ -1844,7 +1844,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
ack = cake_ack_filter(q, flow);
if (ack) {
- b->ack_drops++;
+ WRITE_ONCE(b->ack_drops, b->ack_drops + 1);
sch->qstats.drops++;
ack_pkt_len = qdisc_pkt_len(ack);
b->bytes += ack_pkt_len;
@@ -1860,7 +1860,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
}
/* stats */
- b->packets++;
+ WRITE_ONCE(b->packets, b->packets + 1);
b->bytes += len - ack_pkt_len;
b->backlogs[idx] += len - ack_pkt_len;
b->tin_backlog += len - ack_pkt_len;
@@ -2236,7 +2236,7 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
b->tin_deficit -= len;
}
flow->dropped++;
- b->tin_dropped++;
+ WRITE_ONCE(b->tin_dropped, b->tin_dropped + 1);
qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
qdisc_qstats_drop(sch);
qdisc_dequeue_drop(sch, skb, reason);
@@ -2244,17 +2244,19 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
goto retry;
}
- b->tin_ecn_mark += !!flow->cvars.ecn_marked;
+ WRITE_ONCE(b->tin_ecn_mark, b->tin_ecn_mark + !!flow->cvars.ecn_marked);
qdisc_bstats_update(sch, skb);
WRITE_ONCE(q->last_active, now);
/* collect delay stats */
delay = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb)));
- b->avge_delay = cake_ewma(b->avge_delay, delay, 8);
- b->peak_delay = cake_ewma(b->peak_delay, delay,
- delay > b->peak_delay ? 2 : 8);
- b->base_delay = cake_ewma(b->base_delay, delay,
- delay < b->base_delay ? 2 : 8);
+ WRITE_ONCE(b->avge_delay, cake_ewma(b->avge_delay, delay, 8));
+ WRITE_ONCE(b->peak_delay,
+ cake_ewma(b->peak_delay, delay,
+ delay > b->peak_delay ? 2 : 8));
+ WRITE_ONCE(b->base_delay,
+ cake_ewma(b->base_delay, delay,
+ delay < b->base_delay ? 2 : 8));
len = cake_advance_shaper(q, b, skb, now, false);
flow->deficit -= len;
@@ -3042,17 +3044,17 @@ static int cake_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
PUT_TSTAT_U32(INTERVAL_US,
ktime_to_us(ns_to_ktime(b->cparams.interval)));
- PUT_TSTAT_U32(SENT_PACKETS, b->packets);
- PUT_TSTAT_U32(DROPPED_PACKETS, b->tin_dropped);
- PUT_TSTAT_U32(ECN_MARKED_PACKETS, b->tin_ecn_mark);
- PUT_TSTAT_U32(ACKS_DROPPED_PACKETS, b->ack_drops);
+ PUT_TSTAT_U32(SENT_PACKETS, READ_ONCE(b->packets));
+ PUT_TSTAT_U32(DROPPED_PACKETS, READ_ONCE(b->tin_dropped));
+ PUT_TSTAT_U32(ECN_MARKED_PACKETS, READ_ONCE(b->tin_ecn_mark));
+ PUT_TSTAT_U32(ACKS_DROPPED_PACKETS, READ_ONCE(b->ack_drops));
PUT_TSTAT_U32(PEAK_DELAY_US,
- ktime_to_us(ns_to_ktime(b->peak_delay)));
+ ktime_to_us(ns_to_ktime(READ_ONCE(b->peak_delay))));
PUT_TSTAT_U32(AVG_DELAY_US,
- ktime_to_us(ns_to_ktime(b->avge_delay)));
+ ktime_to_us(ns_to_ktime(READ_ONCE(b->avge_delay))));
PUT_TSTAT_U32(BASE_DELAY_US,
- ktime_to_us(ns_to_ktime(b->base_delay)));
+ ktime_to_us(ns_to_ktime(READ_ONCE(b->base_delay))));
PUT_TSTAT_U32(WAY_INDIRECT_HITS, READ_ONCE(b->way_hits));
PUT_TSTAT_U32(WAY_MISSES, READ_ONCE(b->way_misses));
--
2.54.0.rc2.544.gc7ae2d5bb8-goog
^ permalink raw reply related [flat|nested] 11+ messages in thread* Re: [PATCH net 3/5] net/sched: sch_cake: annotate data-races in cake_dump_stats() (III)
2026-04-23 10:23 ` [PATCH net 3/5] net/sched: sch_cake: annotate data-races in cake_dump_stats() (III) Eric Dumazet
@ 2026-04-23 12:11 ` Toke Høiland-Jørgensen
0 siblings, 0 replies; 11+ messages in thread
From: Toke Høiland-Jørgensen @ 2026-04-23 12:11 UTC (permalink / raw)
To: Eric Dumazet, David S . Miller, Jakub Kicinski, Paolo Abeni
Cc: Simon Horman, Jamal Hadi Salim, Jiri Pirko, netdev, eric.dumazet,
Eric Dumazet
Eric Dumazet <edumazet@google.com> writes:
> cake_dump_stats() runs without qdisc spinlock being held.
>
> In this third patch, I add READ_ONCE()/WRITE_ONCE() annotations
> for the following fields:
>
> - packets
> - tin_dropped
> - tin_ecn_mark
> - ack_drops
> - peak_delay
> - avge_delay
> - base_delay
>
> Other annotations are added in following patches, to ease code review.
>
> Fixes: 046f6fd5daef ("sched: Add Common Applications Kept Enhanced (cake) qdisc")
> Signed-off-by: Eric Dumazet <edumazet@google.com>
> Cc: "Toke Høiland-Jørgensen" <toke@toke.dk>
Acked-by: Toke Høiland-Jørgensen <toke@toke.dk>
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH net 4/5] net/sched: sch_cake: annotate data-races in cake_dump_stats() (IV)
2026-04-23 10:23 [PATCH net 0/5] net/sched: sch_cake: annotate data-races in cake_dump_stats() (series) Eric Dumazet
` (2 preceding siblings ...)
2026-04-23 10:23 ` [PATCH net 3/5] net/sched: sch_cake: annotate data-races in cake_dump_stats() (III) Eric Dumazet
@ 2026-04-23 10:23 ` Eric Dumazet
2026-04-23 12:12 ` Toke Høiland-Jørgensen
2026-04-23 10:23 ` [PATCH net 5/5] net/sched: sch_cake: annotate data-races in cake_dump_stats() (V) Eric Dumazet
4 siblings, 1 reply; 11+ messages in thread
From: Eric Dumazet @ 2026-04-23 10:23 UTC (permalink / raw)
To: David S . Miller, Jakub Kicinski, Paolo Abeni
Cc: Simon Horman, Jamal Hadi Salim, Jiri Pirko,
Toke Høiland-Jørgensen, netdev, eric.dumazet,
Eric Dumazet, Toke Høiland-Jørgensen
cake_dump_stats() runs without qdisc spinlock being held.
In this fourth patch, I add READ_ONCE()/WRITE_ONCE() annotations
for the following fields:
- avg_peak_bandwidth
- buffer_limit
- buffer_max_used
- avg_netoff
- max_netlen
- max_adjlen
- min_netlen
- min_adjlen
- active_queues
- tin_rate_bps
- bytes
- tin_backlog
Other annotations are added in following patch, to ease code review.
Fixes: 046f6fd5daef ("sched: Add Common Applications Kept Enhanced (cake) qdisc")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: "Toke Høiland-Jørgensen" <toke@toke.dk>
---
net/sched/sch_cake.c | 90 ++++++++++++++++++++++----------------------
1 file changed, 46 insertions(+), 44 deletions(-)
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index c5aae31565e984e40937b55201b498174a37180e..c3b09f67f0fdbc51d23b3d22df9ab89a716c7e2b 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -1379,9 +1379,9 @@ static u32 cake_calc_overhead(struct cake_sched_data *qd, u32 len, u32 off)
len -= off;
if (qd->max_netlen < len)
- qd->max_netlen = len;
+ WRITE_ONCE(qd->max_netlen, len);
if (qd->min_netlen > len)
- qd->min_netlen = len;
+ WRITE_ONCE(qd->min_netlen, len);
len += q->rate_overhead;
@@ -1401,9 +1401,9 @@ static u32 cake_calc_overhead(struct cake_sched_data *qd, u32 len, u32 off)
}
if (qd->max_adjlen < len)
- qd->max_adjlen = len;
+ WRITE_ONCE(qd->max_adjlen, len);
if (qd->min_adjlen > len)
- qd->min_adjlen = len;
+ WRITE_ONCE(qd->min_adjlen, len);
return len;
}
@@ -1416,7 +1416,7 @@ static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb)
u16 segs = qdisc_pkt_segs(skb);
u32 len = qdisc_pkt_len(skb);
- q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8);
+ WRITE_ONCE(q->avg_netoff, cake_ewma(q->avg_netoff, off << 16, 8));
if (segs == 1)
return cake_calc_overhead(q, len, off);
@@ -1596,7 +1596,7 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
len = qdisc_pkt_len(skb);
q->buffer_used -= skb->truesize;
b->backlogs[idx] -= len;
- b->tin_backlog -= len;
+ WRITE_ONCE(b->tin_backlog, b->tin_backlog - len);
sch->qstats.backlog -= len;
flow->dropped++;
@@ -1824,9 +1824,9 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
}
/* stats */
- b->bytes += slen;
+ WRITE_ONCE(b->bytes, b->bytes + slen);
b->backlogs[idx] += slen;
- b->tin_backlog += slen;
+ WRITE_ONCE(b->tin_backlog, b->tin_backlog + slen);
sch->qstats.backlog += slen;
q->avg_window_bytes += slen;
@@ -1847,7 +1847,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
WRITE_ONCE(b->ack_drops, b->ack_drops + 1);
sch->qstats.drops++;
ack_pkt_len = qdisc_pkt_len(ack);
- b->bytes += ack_pkt_len;
+ WRITE_ONCE(b->bytes, b->bytes + ack_pkt_len);
q->buffer_used += skb->truesize - ack->truesize;
if (q->config->rate_flags & CAKE_FLAG_INGRESS)
cake_advance_shaper(q, b, ack, now, true);
@@ -1861,9 +1861,9 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
/* stats */
WRITE_ONCE(b->packets, b->packets + 1);
- b->bytes += len - ack_pkt_len;
+ WRITE_ONCE(b->bytes, b->bytes + len - ack_pkt_len);
b->backlogs[idx] += len - ack_pkt_len;
- b->tin_backlog += len - ack_pkt_len;
+ WRITE_ONCE(b->tin_backlog, b->tin_backlog + len - ack_pkt_len);
sch->qstats.backlog += len - ack_pkt_len;
q->avg_window_bytes += len - ack_pkt_len;
}
@@ -1895,9 +1895,9 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC;
b = div64_u64(b, window_interval);
- q->avg_peak_bandwidth =
- cake_ewma(q->avg_peak_bandwidth, b,
- b > q->avg_peak_bandwidth ? 2 : 8);
+ WRITE_ONCE(q->avg_peak_bandwidth,
+ cake_ewma(q->avg_peak_bandwidth, b,
+ b > q->avg_peak_bandwidth ? 2 : 8));
q->avg_window_bytes = 0;
q->avg_window_begin = now;
@@ -1938,7 +1938,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
}
if (q->buffer_used > q->buffer_max_used)
- q->buffer_max_used = q->buffer_used;
+ WRITE_ONCE(q->buffer_max_used, q->buffer_used);
if (q->buffer_used <= q->buffer_limit)
return NET_XMIT_SUCCESS;
@@ -1978,7 +1978,7 @@ static struct sk_buff *cake_dequeue_one(struct Qdisc *sch)
skb = dequeue_head(flow);
len = qdisc_pkt_len(skb);
b->backlogs[q->cur_flow] -= len;
- b->tin_backlog -= len;
+ WRITE_ONCE(b->tin_backlog, b->tin_backlog - len);
sch->qstats.backlog -= len;
q->buffer_used -= skb->truesize;
sch->q.qlen--;
@@ -2043,7 +2043,7 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
cake_configure_rates(sch, new_rate, true);
q->last_checked_active = now;
- q->active_queues = num_active_qs;
+ WRITE_ONCE(q->active_queues, num_active_qs);
}
begin:
@@ -2347,7 +2347,7 @@ static void cake_set_rate(struct cake_tin_data *b, u64 rate, u32 mtu,
/* else unlimited, ie. zero delay */
WRITE_ONCE(b->flow_quantum, 1514);
}
- b->tin_rate_bps = rate;
+ WRITE_ONCE(b->tin_rate_bps, rate);
b->tin_rate_ns = rate_ns;
b->tin_rate_shft = rate_shft;
@@ -2617,25 +2617,27 @@ static void cake_reconfigure(struct Qdisc *sch)
{
struct cake_sched_data *qd = qdisc_priv(sch);
struct cake_sched_config *q = qd->config;
+ u32 buffer_limit;
cake_configure_rates(sch, qd->config->rate_bps, false);
if (q->buffer_config_limit) {
- qd->buffer_limit = q->buffer_config_limit;
+ buffer_limit = q->buffer_config_limit;
} else if (q->rate_bps) {
u64 t = q->rate_bps * q->interval;
do_div(t, USEC_PER_SEC / 4);
- qd->buffer_limit = max_t(u32, t, 4U << 20);
+ buffer_limit = max_t(u32, t, 4U << 20);
} else {
- qd->buffer_limit = ~0;
+ buffer_limit = ~0;
}
sch->flags &= ~TCQ_F_CAN_BYPASS;
- qd->buffer_limit = min(qd->buffer_limit,
- max(sch->limit * psched_mtu(qdisc_dev(sch)),
- q->buffer_config_limit));
+ WRITE_ONCE(qd->buffer_limit,
+ min(buffer_limit,
+ max(sch->limit * psched_mtu(qdisc_dev(sch)),
+ q->buffer_config_limit)));
}
static int cake_config_change(struct cake_sched_config *q, struct nlattr *opt,
@@ -2780,10 +2782,10 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
return ret;
if (overhead_changed) {
- qd->max_netlen = 0;
- qd->max_adjlen = 0;
- qd->min_netlen = ~0;
- qd->min_adjlen = ~0;
+ WRITE_ONCE(qd->max_netlen, 0);
+ WRITE_ONCE(qd->max_adjlen, 0);
+ WRITE_ONCE(qd->min_netlen, ~0);
+ WRITE_ONCE(qd->min_adjlen, ~0);
}
if (qd->tins) {
@@ -3001,15 +3003,15 @@ static int cake_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
goto nla_put_failure; \
} while (0)
- PUT_STAT_U64(CAPACITY_ESTIMATE64, q->avg_peak_bandwidth);
- PUT_STAT_U32(MEMORY_LIMIT, q->buffer_limit);
- PUT_STAT_U32(MEMORY_USED, q->buffer_max_used);
- PUT_STAT_U32(AVG_NETOFF, ((q->avg_netoff + 0x8000) >> 16));
- PUT_STAT_U32(MAX_NETLEN, q->max_netlen);
- PUT_STAT_U32(MAX_ADJLEN, q->max_adjlen);
- PUT_STAT_U32(MIN_NETLEN, q->min_netlen);
- PUT_STAT_U32(MIN_ADJLEN, q->min_adjlen);
- PUT_STAT_U32(ACTIVE_QUEUES, q->active_queues);
+ PUT_STAT_U64(CAPACITY_ESTIMATE64, READ_ONCE(q->avg_peak_bandwidth));
+ PUT_STAT_U32(MEMORY_LIMIT, READ_ONCE(q->buffer_limit));
+ PUT_STAT_U32(MEMORY_USED, READ_ONCE(q->buffer_max_used));
+ PUT_STAT_U32(AVG_NETOFF, ((READ_ONCE(q->avg_netoff) + 0x8000) >> 16));
+ PUT_STAT_U32(MAX_NETLEN, READ_ONCE(q->max_netlen));
+ PUT_STAT_U32(MAX_ADJLEN, READ_ONCE(q->max_adjlen));
+ PUT_STAT_U32(MIN_NETLEN, READ_ONCE(q->min_netlen));
+ PUT_STAT_U32(MIN_ADJLEN, READ_ONCE(q->min_adjlen));
+ PUT_STAT_U32(ACTIVE_QUEUES, READ_ONCE(q->active_queues));
#undef PUT_STAT_U32
#undef PUT_STAT_U64
@@ -3035,9 +3037,9 @@ static int cake_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
if (!ts)
goto nla_put_failure;
- PUT_TSTAT_U64(THRESHOLD_RATE64, b->tin_rate_bps);
- PUT_TSTAT_U64(SENT_BYTES64, b->bytes);
- PUT_TSTAT_U32(BACKLOG_BYTES, b->tin_backlog);
+ PUT_TSTAT_U64(THRESHOLD_RATE64, READ_ONCE(b->tin_rate_bps));
+ PUT_TSTAT_U64(SENT_BYTES64, READ_ONCE(b->bytes));
+ PUT_TSTAT_U32(BACKLOG_BYTES, READ_ONCE(b->tin_backlog));
PUT_TSTAT_U32(TARGET_US,
ktime_to_us(ns_to_ktime(b->cparams.target)));
@@ -3304,10 +3306,10 @@ static int cake_mq_change(struct Qdisc *sch, struct nlattr *opt,
struct cake_sched_data *qd = qdisc_priv(chld);
if (overhead_changed) {
- qd->max_netlen = 0;
- qd->max_adjlen = 0;
- qd->min_netlen = ~0;
- qd->min_adjlen = ~0;
+ WRITE_ONCE(qd->max_netlen, 0);
+ WRITE_ONCE(qd->max_adjlen, 0);
+ WRITE_ONCE(qd->min_netlen, ~0);
+ WRITE_ONCE(qd->min_adjlen, ~0);
}
if (qd->tins) {
--
2.54.0.rc2.544.gc7ae2d5bb8-goog
^ permalink raw reply related [flat|nested] 11+ messages in thread* Re: [PATCH net 4/5] net/sched: sch_cake: annotate data-races in cake_dump_stats() (IV)
2026-04-23 10:23 ` [PATCH net 4/5] net/sched: sch_cake: annotate data-races in cake_dump_stats() (IV) Eric Dumazet
@ 2026-04-23 12:12 ` Toke Høiland-Jørgensen
0 siblings, 0 replies; 11+ messages in thread
From: Toke Høiland-Jørgensen @ 2026-04-23 12:12 UTC (permalink / raw)
To: Eric Dumazet, David S . Miller, Jakub Kicinski, Paolo Abeni
Cc: Simon Horman, Jamal Hadi Salim, Jiri Pirko, netdev, eric.dumazet,
Eric Dumazet
Eric Dumazet <edumazet@google.com> writes:
> cake_dump_stats() runs without qdisc spinlock being held.
>
> In this fourth patch, I add READ_ONCE()/WRITE_ONCE() annotations
> for the following fields:
>
> - avg_peak_bandwidth
> - buffer_limit
> - buffer_max_used
> - avg_netoff
> - max_netlen
> - max_adjlen
> - min_netlen
> - min_adjlen
> - active_queues
> - tin_rate_bps
> - bytes
> - tin_backlog
>
> Other annotations are added in following patch, to ease code review.
>
> Fixes: 046f6fd5daef ("sched: Add Common Applications Kept Enhanced (cake) qdisc")
> Signed-off-by: Eric Dumazet <edumazet@google.com>
> Cc: "Toke Høiland-Jørgensen" <toke@toke.dk>
> ---
> net/sched/sch_cake.c | 90 ++++++++++++++++++++++----------------------
> 1 file changed, 46 insertions(+), 44 deletions(-)
>
> diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
> index c5aae31565e984e40937b55201b498174a37180e..c3b09f67f0fdbc51d23b3d22df9ab89a716c7e2b 100644
> --- a/net/sched/sch_cake.c
> +++ b/net/sched/sch_cake.c
> @@ -1379,9 +1379,9 @@ static u32 cake_calc_overhead(struct cake_sched_data *qd, u32 len, u32 off)
> len -= off;
>
> if (qd->max_netlen < len)
> - qd->max_netlen = len;
> + WRITE_ONCE(qd->max_netlen, len);
> if (qd->min_netlen > len)
> - qd->min_netlen = len;
> + WRITE_ONCE(qd->min_netlen, len);
>
> len += q->rate_overhead;
>
> @@ -1401,9 +1401,9 @@ static u32 cake_calc_overhead(struct cake_sched_data *qd, u32 len, u32 off)
> }
>
> if (qd->max_adjlen < len)
> - qd->max_adjlen = len;
> + WRITE_ONCE(qd->max_adjlen, len);
> if (qd->min_adjlen > len)
> - qd->min_adjlen = len;
> + WRITE_ONCE(qd->min_adjlen, len);
>
> return len;
> }
> @@ -1416,7 +1416,7 @@ static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb)
> u16 segs = qdisc_pkt_segs(skb);
> u32 len = qdisc_pkt_len(skb);
>
> - q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8);
> + WRITE_ONCE(q->avg_netoff, cake_ewma(q->avg_netoff, off << 16, 8));
>
> if (segs == 1)
> return cake_calc_overhead(q, len, off);
> @@ -1596,7 +1596,7 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
> len = qdisc_pkt_len(skb);
> q->buffer_used -= skb->truesize;
> b->backlogs[idx] -= len;
> - b->tin_backlog -= len;
> + WRITE_ONCE(b->tin_backlog, b->tin_backlog - len);
> sch->qstats.backlog -= len;
>
> flow->dropped++;
> @@ -1824,9 +1824,9 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
> }
>
> /* stats */
> - b->bytes += slen;
> + WRITE_ONCE(b->bytes, b->bytes + slen);
> b->backlogs[idx] += slen;
> - b->tin_backlog += slen;
> + WRITE_ONCE(b->tin_backlog, b->tin_backlog + slen);
> sch->qstats.backlog += slen;
> q->avg_window_bytes += slen;
nit: these break up the aligned block, which hurts readability, IMO. Can
we keep the WRITE_ONCE() lines at the top or the bottom of the block?
> @@ -1847,7 +1847,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
> WRITE_ONCE(b->ack_drops, b->ack_drops + 1);
> sch->qstats.drops++;
> ack_pkt_len = qdisc_pkt_len(ack);
> - b->bytes += ack_pkt_len;
> + WRITE_ONCE(b->bytes, b->bytes + ack_pkt_len);
> q->buffer_used += skb->truesize - ack->truesize;
> if (q->config->rate_flags & CAKE_FLAG_INGRESS)
> cake_advance_shaper(q, b, ack, now, true);
> @@ -1861,9 +1861,9 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
>
> /* stats */
> WRITE_ONCE(b->packets, b->packets + 1);
> - b->bytes += len - ack_pkt_len;
> + WRITE_ONCE(b->bytes, b->bytes + len - ack_pkt_len);
> b->backlogs[idx] += len - ack_pkt_len;
> - b->tin_backlog += len - ack_pkt_len;
> + WRITE_ONCE(b->tin_backlog, b->tin_backlog + len - ack_pkt_len);
> sch->qstats.backlog += len - ack_pkt_len;
> q->avg_window_bytes += len - ack_pkt_len;
same here
> }
> @@ -1895,9 +1895,9 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
> u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC;
>
> b = div64_u64(b, window_interval);
> - q->avg_peak_bandwidth =
> - cake_ewma(q->avg_peak_bandwidth, b,
> - b > q->avg_peak_bandwidth ? 2 : 8);
> + WRITE_ONCE(q->avg_peak_bandwidth,
> + cake_ewma(q->avg_peak_bandwidth, b,
> + b > q->avg_peak_bandwidth ? 2 : 8));
> q->avg_window_bytes = 0;
> q->avg_window_begin = now;
>
> @@ -1938,7 +1938,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
> }
>
> if (q->buffer_used > q->buffer_max_used)
> - q->buffer_max_used = q->buffer_used;
> + WRITE_ONCE(q->buffer_max_used, q->buffer_used);
>
> if (q->buffer_used <= q->buffer_limit)
> return NET_XMIT_SUCCESS;
> @@ -1978,7 +1978,7 @@ static struct sk_buff *cake_dequeue_one(struct Qdisc *sch)
> skb = dequeue_head(flow);
> len = qdisc_pkt_len(skb);
> b->backlogs[q->cur_flow] -= len;
> - b->tin_backlog -= len;
> + WRITE_ONCE(b->tin_backlog, b->tin_backlog - len);
> sch->qstats.backlog -= len;
> q->buffer_used -= skb->truesize;
> sch->q.qlen--;
and here
-Toke
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH net 5/5] net/sched: sch_cake: annotate data-races in cake_dump_stats() (V)
2026-04-23 10:23 [PATCH net 0/5] net/sched: sch_cake: annotate data-races in cake_dump_stats() (series) Eric Dumazet
` (3 preceding siblings ...)
2026-04-23 10:23 ` [PATCH net 4/5] net/sched: sch_cake: annotate data-races in cake_dump_stats() (IV) Eric Dumazet
@ 2026-04-23 10:23 ` Eric Dumazet
2026-04-23 12:12 ` Toke Høiland-Jørgensen
4 siblings, 1 reply; 11+ messages in thread
From: Eric Dumazet @ 2026-04-23 10:23 UTC (permalink / raw)
To: David S . Miller, Jakub Kicinski, Paolo Abeni
Cc: Simon Horman, Jamal Hadi Salim, Jiri Pirko,
Toke Høiland-Jørgensen, netdev, eric.dumazet,
Eric Dumazet, Toke Høiland-Jørgensen
cake_dump_stats() runs without qdisc spinlock being held.
In this final patch, I add READ_ONCE()/WRITE_ONCE() annotations
for cparams.target and cparams.interval.
Fixes: 046f6fd5daef ("sched: Add Common Applications Kept Enhanced (cake) qdisc")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: "Toke Høiland-Jørgensen" <toke@toke.dk>
---
net/sched/sch_cake.c | 13 +++++++------
1 file changed, 7 insertions(+), 6 deletions(-)
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index c3b09f67f0fdbc51d23b3d22df9ab89a716c7e2b..53d90062c32fcf647e4837416e335f4562ab97d9 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -2356,10 +2356,11 @@ static void cake_set_rate(struct cake_tin_data *b, u64 rate, u32 mtu,
byte_target_ns = (byte_target * rate_ns) >> rate_shft;
- b->cparams.target = max((byte_target_ns * 3) / 2, target_ns);
- b->cparams.interval = max(rtt_est_ns +
- b->cparams.target - target_ns,
- b->cparams.target * 2);
+ WRITE_ONCE(b->cparams.target,
+ max((byte_target_ns * 3) / 2, target_ns));
+ WRITE_ONCE(b->cparams.interval,
+ max(rtt_est_ns + b->cparams.target - target_ns,
+ b->cparams.target * 2));
b->cparams.mtu_time = byte_target_ns;
b->cparams.p_inc = 1 << 24; /* 1/256 */
b->cparams.p_dec = 1 << 20; /* 1/4096 */
@@ -3042,9 +3043,9 @@ static int cake_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
PUT_TSTAT_U32(BACKLOG_BYTES, READ_ONCE(b->tin_backlog));
PUT_TSTAT_U32(TARGET_US,
- ktime_to_us(ns_to_ktime(b->cparams.target)));
+ ktime_to_us(ns_to_ktime(READ_ONCE(b->cparams.target))));
PUT_TSTAT_U32(INTERVAL_US,
- ktime_to_us(ns_to_ktime(b->cparams.interval)));
+ ktime_to_us(ns_to_ktime(READ_ONCE(b->cparams.interval))));
PUT_TSTAT_U32(SENT_PACKETS, READ_ONCE(b->packets));
PUT_TSTAT_U32(DROPPED_PACKETS, READ_ONCE(b->tin_dropped));
--
2.54.0.rc2.544.gc7ae2d5bb8-goog
^ permalink raw reply related [flat|nested] 11+ messages in thread* Re: [PATCH net 5/5] net/sched: sch_cake: annotate data-races in cake_dump_stats() (V)
2026-04-23 10:23 ` [PATCH net 5/5] net/sched: sch_cake: annotate data-races in cake_dump_stats() (V) Eric Dumazet
@ 2026-04-23 12:12 ` Toke Høiland-Jørgensen
0 siblings, 0 replies; 11+ messages in thread
From: Toke Høiland-Jørgensen @ 2026-04-23 12:12 UTC (permalink / raw)
To: Eric Dumazet, David S . Miller, Jakub Kicinski, Paolo Abeni
Cc: Simon Horman, Jamal Hadi Salim, Jiri Pirko, netdev, eric.dumazet,
Eric Dumazet
Eric Dumazet <edumazet@google.com> writes:
> cake_dump_stats() runs without qdisc spinlock being held.
>
> In this final patch, I add READ_ONCE()/WRITE_ONCE() annotations
> for cparams.target and cparams.interval.
>
> Fixes: 046f6fd5daef ("sched: Add Common Applications Kept Enhanced (cake) qdisc")
> Signed-off-by: Eric Dumazet <edumazet@google.com>
> Cc: "Toke Høiland-Jørgensen" <toke@toke.dk>
Acked-by: Toke Høiland-Jørgensen <toke@toke.dk>
^ permalink raw reply [flat|nested] 11+ messages in thread