public inbox for netdev@vger.kernel.org
 help / color / mirror / Atom feed
From: "Toke Høiland-Jørgensen" <toke@redhat.com>
To: Eric Dumazet <edumazet@google.com>,
	"David S . Miller" <davem@davemloft.net>,
	Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>
Cc: Simon Horman <horms@kernel.org>,
	Jamal Hadi Salim <jhs@mojatatu.com>,
	Jiri Pirko <jiri@resnulli.us>,
	netdev@vger.kernel.org, eric.dumazet@gmail.com,
	Eric Dumazet <edumazet@google.com>
Subject: Re: [PATCH v3 net-next 13/15] net/sched: sch_cake: annotate data-races in cake_dump_stats()
Date: Mon, 13 Apr 2026 14:07:25 +0200	[thread overview]
Message-ID: <87se8zcbcy.fsf@toke.dk> (raw)
In-Reply-To: <20260410182257.774311-14-edumazet@google.com>

Eric Dumazet <edumazet@google.com> writes:

> cake_dump_stats() and cake_dump_class_stats() run without qdisc
> spinlock being held.
>
> Add READ_ONCE()/WRITE_ONCE() annotations.
>
> Fixes: 046f6fd5daef ("sched: Add Common Applications Kept Enhanced (cake) qdisc")
> Signed-off-by: Eric Dumazet <edumazet@google.com>
> Cc: "Toke Høiland-Jørgensen" <toke@toke.dk>
> ---
>  net/sched/sch_cake.c | 404 ++++++++++++++++++++++++-------------------
>  1 file changed, 225 insertions(+), 179 deletions(-)

One of these diffstats is not like the others - thanks for tackling this :)

A few nits below:

> diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
> index 32e672820c00a88c6d8fe77a6308405e016525ea..f523f0aa4d830e9d3ec4d43bb123e1dc4f8f289d 100644
> --- a/net/sched/sch_cake.c
> +++ b/net/sched/sch_cake.c
> @@ -399,14 +399,14 @@ static void cake_configure_rates(struct Qdisc *sch, u64 rate, bool rate_adjust);
>   * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32
>   */
>  
> -static void cobalt_newton_step(struct cobalt_vars *vars)
> +static void cobalt_newton_step(struct cobalt_vars *vars, u32 count)
>  {
>  	u32 invsqrt, invsqrt2;
>  	u64 val;
>  
>  	invsqrt = vars->rec_inv_sqrt;
>  	invsqrt2 = ((u64)invsqrt * invsqrt) >> 32;
> -	val = (3LL << 32) - ((u64)vars->count * invsqrt2);
> +	val = (3LL << 32) - ((u64)count * invsqrt2);
>  
>  	val >>= 2; /* avoid overflow in following multiply */
>  	val = (val * invsqrt) >> (32 - 2 + 1);
> @@ -414,12 +414,12 @@ static void cobalt_newton_step(struct cobalt_vars *vars)
>  	vars->rec_inv_sqrt = val;
>  }
>  
> -static void cobalt_invsqrt(struct cobalt_vars *vars)
> +static void cobalt_invsqrt(struct cobalt_vars *vars, u32 count)
>  {
> -	if (vars->count < REC_INV_SQRT_CACHE)
> -		vars->rec_inv_sqrt = inv_sqrt_cache[vars->count];
> +	if (count < REC_INV_SQRT_CACHE)
> +		vars->rec_inv_sqrt = inv_sqrt_cache[count];
>  	else
> -		cobalt_newton_step(vars);
> +		cobalt_newton_step(vars, count);
>  }
>  
>  static void cobalt_vars_init(struct cobalt_vars *vars)
> @@ -449,16 +449,19 @@ static bool cobalt_queue_full(struct cobalt_vars *vars,
>  	bool up = false;
>  
>  	if (ktime_to_ns(ktime_sub(now, vars->blue_timer)) > p->target) {
> -		up = !vars->p_drop;
> -		vars->p_drop += p->p_inc;
> -		if (vars->p_drop < p->p_inc)
> -			vars->p_drop = ~0;
> -		vars->blue_timer = now;
> -	}
> -	vars->dropping = true;
> -	vars->drop_next = now;
> +		u32 p_drop = vars->p_drop;
> +
> +		up = !p_drop;
> +		p_drop += p->p_inc;
> +		if (p_drop < p->p_inc)
> +			p_drop = ~0;
> +		WRITE_ONCE(vars->p_drop, p_drop);
> +		WRITE_ONCE(vars->blue_timer, now);
> +	}
> +	WRITE_ONCE(vars->dropping, true);
> +	WRITE_ONCE(vars->drop_next, now);
>  	if (!vars->count)
> -		vars->count = 1;
> +		WRITE_ONCE(vars->count, 1);
>  
>  	return up;
>  }
> @@ -474,21 +477,25 @@ static bool cobalt_queue_empty(struct cobalt_vars *vars,
>  
>  	if (vars->p_drop &&
>  	    ktime_to_ns(ktime_sub(now, vars->blue_timer)) > p->target) {
> -		if (vars->p_drop < p->p_dec)
> -			vars->p_drop = 0;
> +		u32 p_drop = vars->p_drop;
> +
> +		if (p_drop < p->p_dec)
> +			p_drop = 0;
>  		else
> -			vars->p_drop -= p->p_dec;
> -		vars->blue_timer = now;
> -		down = !vars->p_drop;
> +			p_drop -= p->p_dec;
> +		WRITE_ONCE(vars->p_drop, p_drop);
> +		WRITE_ONCE(vars->blue_timer, now);
> +		down = !p_drop;
>  	}
> -	vars->dropping = false;
> +	WRITE_ONCE(vars->dropping, false);
>  
>  	if (vars->count && ktime_to_ns(ktime_sub(now, vars->drop_next)) >= 0) {
> -		vars->count--;
> -		cobalt_invsqrt(vars);
> -		vars->drop_next = cobalt_control(vars->drop_next,
> -						 p->interval,
> -						 vars->rec_inv_sqrt);
> +		WRITE_ONCE(vars->count, vars->count - 1);
> +		cobalt_invsqrt(vars, vars->count);
> +		WRITE_ONCE(vars->drop_next,
> +			   cobalt_control(vars->drop_next,
> +					  p->interval,
> +					  vars->rec_inv_sqrt));
>  	}
>  
>  	return down;
> @@ -507,6 +514,7 @@ static enum qdisc_drop_reason cobalt_should_drop(struct cobalt_vars *vars,
>  	bool next_due, over_target;
>  	ktime_t schedule;
>  	u64 sojourn;
> +	u32 count;
>  
>  /* The 'schedule' variable records, in its sign, whether 'now' is before or
>   * after 'drop_next'.  This allows 'drop_next' to be updated before the next
> @@ -528,45 +536,50 @@ static enum qdisc_drop_reason cobalt_should_drop(struct cobalt_vars *vars,
>  	over_target = sojourn > p->target &&
>  		      sojourn > p->mtu_time * bulk_flows * 2 &&
>  		      sojourn > p->mtu_time * 4;
> -	next_due = vars->count && ktime_to_ns(schedule) >= 0;
> +	count = vars->count;
> +	next_due = count && ktime_to_ns(schedule) >= 0;
>  
>  	vars->ecn_marked = false;
>  
>  	if (over_target) {
>  		if (!vars->dropping) {
> -			vars->dropping = true;
> -			vars->drop_next = cobalt_control(now,
> -							 p->interval,
> -							 vars->rec_inv_sqrt);
> +			WRITE_ONCE(vars->dropping, true);
> +			WRITE_ONCE(vars->drop_next,
> +				   cobalt_control(now,
> +						  p->interval,
> +						  vars->rec_inv_sqrt));
>  		}
> -		if (!vars->count)
> -			vars->count = 1;
> +		if (!count)
> +			count = 1;
>  	} else if (vars->dropping) {
> -		vars->dropping = false;
> +		WRITE_ONCE(vars->dropping, false);
>  	}
>  
>  	if (next_due && vars->dropping) {
>  		/* Use ECN mark if possible, otherwise drop */
> -		if (!(vars->ecn_marked = INET_ECN_set_ce(skb)))
> +		vars->ecn_marked = INET_ECN_set_ce(skb);
> +		if (!vars->ecn_marked)
>  			reason = QDISC_DROP_CONGESTED;
>  
> -		vars->count++;
> -		if (!vars->count)
> -			vars->count--;
> -		cobalt_invsqrt(vars);
> -		vars->drop_next = cobalt_control(vars->drop_next,
> -						 p->interval,
> -						 vars->rec_inv_sqrt);
> +		count++;
> +		if (!count)
> +			count--;
> +		cobalt_invsqrt(vars, count);
> +		WRITE_ONCE(vars->drop_next,
> +			   cobalt_control(vars->drop_next,
> +					  p->interval,
> +					  vars->rec_inv_sqrt));
>  		schedule = ktime_sub(now, vars->drop_next);
>  	} else {
>  		while (next_due) {
> -			vars->count--;
> -			cobalt_invsqrt(vars);
> -			vars->drop_next = cobalt_control(vars->drop_next,
> -							 p->interval,
> -							 vars->rec_inv_sqrt);
> +			count--;
> +			cobalt_invsqrt(vars, count);
> +			WRITE_ONCE(vars->drop_next,
> +				   cobalt_control(vars->drop_next,
> +						  p->interval,
> +						  vars->rec_inv_sqrt));
>  			schedule = ktime_sub(now, vars->drop_next);
> -			next_due = vars->count && ktime_to_ns(schedule) >= 0;
> +			next_due = count && ktime_to_ns(schedule) >= 0;
>  		}
>  	}
>  
> @@ -575,11 +588,12 @@ static enum qdisc_drop_reason cobalt_should_drop(struct cobalt_vars *vars,
>  	    get_random_u32() < vars->p_drop)
>  		reason = QDISC_DROP_FLOOD_PROTECTION;
>  
> +	WRITE_ONCE(vars->count, count);
>  	/* Overload the drop_next field as an activity timeout */
> -	if (!vars->count)
> -		vars->drop_next = ktime_add_ns(now, p->interval);
> +	if (count)

This seems to reverse the conditional?

> +		WRITE_ONCE(vars->drop_next, ktime_add_ns(now, p->interval));
>  	else if (ktime_to_ns(schedule) > 0 && reason == QDISC_DROP_UNSPEC)
> -		vars->drop_next = now;
> +		WRITE_ONCE(vars->drop_next, now);
>  
>  	return reason;
>  }
> @@ -813,7 +827,7 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
>  		     i++, k = (k + 1) % CAKE_SET_WAYS) {
>  			if (q->tags[outer_hash + k] == flow_hash) {
>  				if (i)
> -					q->way_hits++;
> +					WRITE_ONCE(q->way_hits, q->way_hits + 1);
>  
>  				if (!q->flows[outer_hash + k].set) {
>  					/* need to increment host refcnts */
> @@ -831,7 +845,7 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
>  		for (i = 0; i < CAKE_SET_WAYS;
>  			 i++, k = (k + 1) % CAKE_SET_WAYS) {
>  			if (!q->flows[outer_hash + k].set) {
> -				q->way_misses++;
> +				WRITE_ONCE(q->way_misses, q->way_misses + 1);
>  				allocate_src = cake_dsrc(flow_mode);
>  				allocate_dst = cake_ddst(flow_mode);
>  				goto found;
> @@ -841,7 +855,7 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
>  		/* With no empty queues, default to the original
>  		 * queue, accept the collision, update the host tags.
>  		 */
> -		q->way_collisions++;
> +		WRITE_ONCE(q->way_collisions, q->way_collisions + 1);
>  		allocate_src = cake_dsrc(flow_mode);
>  		allocate_dst = cake_ddst(flow_mode);
>  
> @@ -875,7 +889,8 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
>  			q->flows[reduced_hash].srchost = srchost_idx;
>  
>  			if (q->flows[reduced_hash].set == CAKE_SET_BULK)
> -				cake_inc_srchost_bulk_flow_count(q, &q->flows[reduced_hash], flow_mode);
> +				cake_inc_srchost_bulk_flow_count(q, &q->flows[reduced_hash],
> +								 flow_mode);
>  		}
>  
>  		if (allocate_dst) {
> @@ -899,7 +914,8 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
>  			q->flows[reduced_hash].dsthost = dsthost_idx;
>  
>  			if (q->flows[reduced_hash].set == CAKE_SET_BULK)
> -				cake_inc_dsthost_bulk_flow_count(q, &q->flows[reduced_hash], flow_mode);
> +				cake_inc_dsthost_bulk_flow_count(q, &q->flows[reduced_hash],
> +								 flow_mode);
>  		}
>  	}
>  
> @@ -1379,9 +1395,9 @@ static u32 cake_calc_overhead(struct cake_sched_data *qd, u32 len, u32 off)
>  		len -= off;
>  
>  	if (qd->max_netlen < len)
> -		qd->max_netlen = len;
> +		WRITE_ONCE(qd->max_netlen, len);
>  	if (qd->min_netlen > len)
> -		qd->min_netlen = len;
> +		WRITE_ONCE(qd->min_netlen, len);
>  
>  	len += q->rate_overhead;
>  
> @@ -1401,9 +1417,9 @@ static u32 cake_calc_overhead(struct cake_sched_data *qd, u32 len, u32 off)
>  	}
>  
>  	if (qd->max_adjlen < len)
> -		qd->max_adjlen = len;
> +		WRITE_ONCE(qd->max_adjlen, len);
>  	if (qd->min_adjlen > len)
> -		qd->min_adjlen = len;
> +		WRITE_ONCE(qd->min_adjlen, len);
>  
>  	return len;
>  }
> @@ -1416,7 +1432,7 @@ static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb)
>  	u16 segs = qdisc_pkt_segs(skb);
>  	u32 len = qdisc_pkt_len(skb);
>  
> -	q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8);
> +	WRITE_ONCE(q->avg_netoff, cake_ewma(q->avg_netoff, off << 16, 8));
>  
>  	if (segs == 1)
>  		return cake_calc_overhead(q, len, off);
> @@ -1590,16 +1606,17 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
>  	}
>  
>  	if (cobalt_queue_full(&flow->cvars, &b->cparams, now))
> -		b->unresponsive_flow_count++;
> +		WRITE_ONCE(b->unresponsive_flow_count,
> +			   b->unresponsive_flow_count + 1);
>  
>  	len = qdisc_pkt_len(skb);
>  	q->buffer_used      -= skb->truesize;
> -	b->backlogs[idx]    -= len;
> -	b->tin_backlog      -= len;
> +	WRITE_ONCE(b->backlogs[idx], b->backlogs[idx] - len);
> +	WRITE_ONCE(b->tin_backlog, b->tin_backlog - len);
>  	qstats_backlog_sub(sch, len);
>  
> -	flow->dropped++;
> -	b->tin_dropped++;
> +	WRITE_ONCE(flow->dropped, flow->dropped + 1);
> +	WRITE_ONCE(b->tin_dropped, b->tin_dropped + 1);
>  
>  	if (q->config->rate_flags & CAKE_FLAG_INGRESS)
>  		cake_advance_shaper(q, b, skb, now, true);
> @@ -1795,7 +1812,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
>  	}
>  
>  	if (unlikely(len > b->max_skblen))
> -		b->max_skblen = len;
> +		WRITE_ONCE(b->max_skblen, len);
>  
>  	if (qdisc_pkt_segs(skb) > 1 && q->config->rate_flags & CAKE_FLAG_SPLIT_GSO) {
>  		struct sk_buff *segs, *nskb;
> @@ -1819,13 +1836,13 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
>  			numsegs++;
>  			slen += segs->len;
>  			q->buffer_used += segs->truesize;
> -			b->packets++;

Right above this hunk we do sch->q.qlen++; - does that need changing as
well?

>  		}
>  
>  		/* stats */
> -		b->bytes	    += slen;
> -		b->backlogs[idx]    += slen;
> -		b->tin_backlog      += slen;
> +		WRITE_ONCE(b->bytes, b->bytes + slen);
> +		WRITE_ONCE(b->packets, b->packets + numsegs);
> +		WRITE_ONCE(b->backlogs[idx], b->backlogs[idx] + slen);
> +		WRITE_ONCE(b->tin_backlog, b->tin_backlog + slen);
>  		qstats_backlog_add(sch, slen);
>  		q->avg_window_bytes += slen;
>  
> @@ -1843,10 +1860,10 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
>  			ack = cake_ack_filter(q, flow);
>  
>  		if (ack) {
> -			b->ack_drops++;
> +			WRITE_ONCE(b->ack_drops, b->ack_drops + 1);
>  			qdisc_qstats_drop(sch);
>  			ack_pkt_len = qdisc_pkt_len(ack);
> -			b->bytes += ack_pkt_len;
> +			WRITE_ONCE(b->bytes, b->bytes + ack_pkt_len);
>  			q->buffer_used += skb->truesize - ack->truesize;
>  			if (q->config->rate_flags & CAKE_FLAG_INGRESS)
>  				cake_advance_shaper(q, b, ack, now, true);
> @@ -1859,10 +1876,10 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
>  		}
>  
>  		/* stats */
> -		b->packets++;
> -		b->bytes	    += len - ack_pkt_len;
> -		b->backlogs[idx]    += len - ack_pkt_len;
> -		b->tin_backlog      += len - ack_pkt_len;
> +		WRITE_ONCE(b->packets, b->packets + 1);
> +		WRITE_ONCE(b->bytes, b->bytes + len - ack_pkt_len);
> +		WRITE_ONCE(b->backlogs[idx], b->backlogs[idx] + len - ack_pkt_len);
> +		WRITE_ONCE(b->tin_backlog, b->tin_backlog + len - ack_pkt_len);
>  		qstats_backlog_add(sch, len - ack_pkt_len);
>  		q->avg_window_bytes += len - ack_pkt_len;
>  	}
> @@ -1894,9 +1911,9 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
>  			u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC;
>  
>  			b = div64_u64(b, window_interval);
> -			q->avg_peak_bandwidth =
> -				cake_ewma(q->avg_peak_bandwidth, b,
> -					  b > q->avg_peak_bandwidth ? 2 : 8);
> +			WRITE_ONCE(q->avg_peak_bandwidth,
> +				   cake_ewma(q->avg_peak_bandwidth, b,
> +					     b > q->avg_peak_bandwidth ? 2 : 8));
>  			q->avg_window_bytes = 0;
>  			q->avg_window_begin = now;
>  
> @@ -1917,27 +1934,30 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
>  		if (!flow->set) {
>  			list_add_tail(&flow->flowchain, &b->new_flows);
>  		} else {
> -			b->decaying_flow_count--;
> +			WRITE_ONCE(b->decaying_flow_count,
> +				   b->decaying_flow_count - 1);
>  			list_move_tail(&flow->flowchain, &b->new_flows);
>  		}
>  		flow->set = CAKE_SET_SPARSE;
> -		b->sparse_flow_count++;
> +		WRITE_ONCE(b->sparse_flow_count,
> +			   b->sparse_flow_count + 1);
>  
> -		flow->deficit = cake_get_flow_quantum(b, flow, q->config->flow_mode);
> +		WRITE_ONCE(flow->deficit,
> +			   cake_get_flow_quantum(b, flow, q->config->flow_mode));
>  	} else if (flow->set == CAKE_SET_SPARSE_WAIT) {
>  		/* this flow was empty, accounted as a sparse flow, but actually
>  		 * in the bulk rotation.
>  		 */
>  		flow->set = CAKE_SET_BULK;
> -		b->sparse_flow_count--;
> -		b->bulk_flow_count++;
> +		WRITE_ONCE(b->sparse_flow_count, b->sparse_flow_count - 1);
> +		WRITE_ONCE(b->bulk_flow_count, b->bulk_flow_count + 1);
>  
>  		cake_inc_srchost_bulk_flow_count(b, flow, q->config->flow_mode);
>  		cake_inc_dsthost_bulk_flow_count(b, flow, q->config->flow_mode);
>  	}
>  
>  	if (q->buffer_used > q->buffer_max_used)
> -		q->buffer_max_used = q->buffer_used;
> +		WRITE_ONCE(q->buffer_max_used, q->buffer_used);
>  
>  	if (q->buffer_used <= q->buffer_limit)
>  		return NET_XMIT_SUCCESS;
> @@ -1976,8 +1996,8 @@ static struct sk_buff *cake_dequeue_one(struct Qdisc *sch)
>  	if (flow->head) {
>  		skb = dequeue_head(flow);
>  		len = qdisc_pkt_len(skb);
> -		b->backlogs[q->cur_flow] -= len;
> -		b->tin_backlog		 -= len;
> +		WRITE_ONCE(b->backlogs[q->cur_flow], b->backlogs[q->cur_flow] - len);
> +		WRITE_ONCE(b->tin_backlog, b->tin_backlog - len);
>  		qstats_backlog_sub(sch, len);
>  		q->buffer_used		 -= skb->truesize;
>  		qdisc_qlen_dec(sch);
> @@ -2042,7 +2062,7 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
>  
>  		cake_configure_rates(sch, new_rate, true);
>  		q->last_checked_active = now;
> -		q->active_queues = num_active_qs;
> +		WRITE_ONCE(q->active_queues, num_active_qs);
>  	}
>  
>  begin:
> @@ -2149,8 +2169,10 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
>  		 */
>  		if (flow->set == CAKE_SET_SPARSE) {
>  			if (flow->head) {
> -				b->sparse_flow_count--;
> -				b->bulk_flow_count++;
> +				WRITE_ONCE(b->sparse_flow_count,
> +					   b->sparse_flow_count - 1);
> +				WRITE_ONCE(b->bulk_flow_count,
> +					   b->bulk_flow_count + 1);
>  
>  				cake_inc_srchost_bulk_flow_count(b, flow, q->config->flow_mode);
>  				cake_inc_dsthost_bulk_flow_count(b, flow, q->config->flow_mode);
> @@ -2165,7 +2187,8 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
>  			}
>  		}
>  
> -		flow->deficit += cake_get_flow_quantum(b, flow, q->config->flow_mode);
> +		WRITE_ONCE(flow->deficit,
> +			   flow->deficit + cake_get_flow_quantum(b, flow, q->config->flow_mode));
>  		list_move_tail(&flow->flowchain, &b->old_flows);
>  
>  		goto retry;
> @@ -2177,7 +2200,8 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
>  		if (!skb) {
>  			/* this queue was actually empty */
>  			if (cobalt_queue_empty(&flow->cvars, &b->cparams, now))
> -				b->unresponsive_flow_count--;
> +				WRITE_ONCE(b->unresponsive_flow_count,
> +					   b->unresponsive_flow_count - 1);
>  
>  			if (flow->cvars.p_drop || flow->cvars.count ||
>  			    ktime_before(now, flow->cvars.drop_next)) {
> @@ -2187,16 +2211,22 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
>  				list_move_tail(&flow->flowchain,
>  					       &b->decaying_flows);
>  				if (flow->set == CAKE_SET_BULK) {
> -					b->bulk_flow_count--;
> +					WRITE_ONCE(b->bulk_flow_count,
> +						   b->bulk_flow_count - 1);
>  
> -					cake_dec_srchost_bulk_flow_count(b, flow, q->config->flow_mode);
> -					cake_dec_dsthost_bulk_flow_count(b, flow, q->config->flow_mode);
> +					cake_dec_srchost_bulk_flow_count(b, flow,
> +									 q->config->flow_mode);
> +					cake_dec_dsthost_bulk_flow_count(b, flow,
> +									 q->config->flow_mode);

These seem like unnecessary whitespace changes?

>  
> -					b->decaying_flow_count++;
> +					WRITE_ONCE(b->decaying_flow_count,
> +						   b->decaying_flow_count + 1);
>  				} else if (flow->set == CAKE_SET_SPARSE ||
>  					   flow->set == CAKE_SET_SPARSE_WAIT) {
> -					b->sparse_flow_count--;
> -					b->decaying_flow_count++;
> +					WRITE_ONCE(b->sparse_flow_count,
> +						   b->sparse_flow_count - 1);
> +					WRITE_ONCE(b->decaying_flow_count,
> +						   b->decaying_flow_count + 1);
>  				}
>  				flow->set = CAKE_SET_DECAYING;
>  			} else {
> @@ -2204,14 +2234,20 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
>  				list_del_init(&flow->flowchain);
>  				if (flow->set == CAKE_SET_SPARSE ||
>  				    flow->set == CAKE_SET_SPARSE_WAIT)
> -					b->sparse_flow_count--;
> +					WRITE_ONCE(b->sparse_flow_count,
> +						   b->sparse_flow_count - 1);
>  				else if (flow->set == CAKE_SET_BULK) {
> -					b->bulk_flow_count--;
> +					WRITE_ONCE(b->bulk_flow_count,
> +						   b->bulk_flow_count - 1);
>  
> -					cake_dec_srchost_bulk_flow_count(b, flow, q->config->flow_mode);
> -					cake_dec_dsthost_bulk_flow_count(b, flow, q->config->flow_mode);

Same here?

-Toke


  reply	other threads:[~2026-04-13 12:07 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-04-10 18:22 [PATCH v3 net-next 00/15] net/sched: prepare RTNL removal from qdisc dumps Eric Dumazet
2026-04-10 18:22 ` [PATCH v3 net-next 01/15] net/sched: rename qstats_overlimit_inc() to qstats_cpu_overlimit_inc() Eric Dumazet
2026-04-10 18:22 ` [PATCH v3 net-next 02/15] net/sched: add qstats_cpu_drop_inc() helper Eric Dumazet
2026-04-10 18:22 ` [PATCH v3 net-next 03/15] net/sched: add READ_ONCE() in gnet_stats_add_queue[_cpu] Eric Dumazet
2026-04-10 18:22 ` [PATCH v3 net-next 04/15] net/sched: add qdisc_qlen_inc() and qdisc_qlen_dec() Eric Dumazet
2026-04-10 18:22 ` [PATCH v3 net-next 05/15] net/sched: annotate data-races around sch->qstats.backlog Eric Dumazet
2026-04-10 18:22 ` [PATCH v3 net-next 06/15] net/sched: sch_sfb: annotate data-races in sfb_dump_stats() Eric Dumazet
2026-04-10 18:22 ` [PATCH v3 net-next 07/15] net/sched: sch_red: annotate data-races in red_dump_stats() Eric Dumazet
2026-04-10 18:22 ` [PATCH v3 net-next 08/15] net/sched: sch_fq_codel: remove data-races from fq_codel_dump_stats() Eric Dumazet
2026-04-10 18:22 ` [PATCH v3 net-next 09/15] net/sched: sch_pie: annotate data-races in pie_dump_stats() Eric Dumazet
2026-04-10 18:22 ` [PATCH v3 net-next 10/15] net/sched: sch_fq_pie: annotate data-races in fq_pie_dump_stats() Eric Dumazet
2026-04-10 18:22 ` [PATCH v3 net-next 11/15] net_sched: sch_hhf: annotate data-races in hhf_dump_stats() Eric Dumazet
2026-04-10 18:22 ` [PATCH v3 net-next 12/15] net/sched: sch_choke: annotate data-races in choke_dump_stats() Eric Dumazet
2026-04-10 18:22 ` [PATCH v3 net-next 13/15] net/sched: sch_cake: annotate data-races in cake_dump_stats() Eric Dumazet
2026-04-13 12:07   ` Toke Høiland-Jørgensen [this message]
2026-04-13 13:11     ` Eric Dumazet
2026-04-13 14:23       ` Toke Høiland-Jørgensen
2026-04-10 18:22 ` [PATCH v3 net-next 14/15] net/sched: mq: no longer acquire qdisc spinlocks in dump operations Eric Dumazet
2026-04-13 13:16 ` [PATCH v3 net-next 00/15] net/sched: prepare RTNL removal from qdisc dumps Eric Dumazet

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=87se8zcbcy.fsf@toke.dk \
    --to=toke@redhat.com \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=eric.dumazet@gmail.com \
    --cc=horms@kernel.org \
    --cc=jhs@mojatatu.com \
    --cc=jiri@resnulli.us \
    --cc=kuba@kernel.org \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox