* [PATCH] net_sched: accurate bytes/packets stats/rates
@ 2011-01-14 16:16 Eric Dumazet
2011-01-14 17:52 ` Stephen Hemminger
0 siblings, 1 reply; 11+ messages in thread
From: Eric Dumazet @ 2011-01-14 16:16 UTC (permalink / raw)
To: David Miller
Cc: netdev, Patrick McHardy, Stephen Hemminger, jamal,
Jarek Poplawski
In commit 44b8288308ac9d (net_sched: pfifo_head_drop problem), we fixed
a problem with pfifo_head drops that incorrectly decreased
sch->bstats.bytes and sch->bstats.packets
Several qdiscs (CHOKe, SFQ, pfifo_head, ...) are able to drop a
previously enqueued packet, and bstats cannot be changed, so
bstats/rates are not accurate (over estimated)
This patch changes the qdisc_bstats updates to be done at dequeue() time
instead of enqueue() time. bstats counters no longer account for dropped
frames, and rates are more correct, since enqueue() bursts dont have
effect on dequeue() rate.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
CC: Patrick McHardy <kaber@trash.net>
CC: Stephen Hemminger <shemminger@vyatta.com>
CC: Jarek Poplawski <jarkao2@gmail.com>
CC: jamal <hadi@cyberus.ca>
---
include/net/sch_generic.h | 14 ++++++++------
net/sched/sch_cbq.c | 3 +--
net/sched/sch_drr.c | 2 +-
net/sched/sch_dsmark.c | 2 +-
net/sched/sch_fifo.c | 2 +-
net/sched/sch_generic.c | 2 +-
net/sched/sch_hfsc.c | 2 +-
net/sched/sch_htb.c | 12 +++++-------
net/sched/sch_multiq.c | 2 +-
net/sched/sch_netem.c | 3 +--
net/sched/sch_prio.c | 2 +-
net/sched/sch_red.c | 11 ++++++-----
net/sched/sch_sfq.c | 5 ++---
net/sched/sch_tbf.c | 2 +-
net/sched/sch_teql.c | 3 ++-
15 files changed, 33 insertions(+), 34 deletions(-)
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index e9eee99..8fed414 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -445,7 +445,6 @@ static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
{
__skb_queue_tail(list, skb);
sch->qstats.backlog += qdisc_pkt_len(skb);
- qdisc_bstats_update(sch, skb);
return NET_XMIT_SUCCESS;
}
@@ -456,25 +455,28 @@ static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
}
static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
- struct sk_buff_head *list)
+ struct sk_buff_head *list,
+ bool stats_update)
{
struct sk_buff *skb = __skb_dequeue(list);
- if (likely(skb != NULL))
+ if (likely(skb != NULL)) {
+ if (stats_update)
+ qdisc_bstats_update(sch, skb);
sch->qstats.backlog -= qdisc_pkt_len(skb);
-
+ }
return skb;
}
static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
{
- return __qdisc_dequeue_head(sch, &sch->q);
+ return __qdisc_dequeue_head(sch, &sch->q, true);
}
static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
struct sk_buff_head *list)
{
- struct sk_buff *skb = __qdisc_dequeue_head(sch, list);
+ struct sk_buff *skb = __qdisc_dequeue_head(sch, list, false);
if (likely(skb != NULL)) {
unsigned int len = qdisc_pkt_len(skb);
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index c80d1c2..5f63ec5 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -390,7 +390,6 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
ret = qdisc_enqueue(skb, cl->q);
if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++;
- qdisc_bstats_update(sch, skb);
cbq_mark_toplevel(q, cl);
if (!cl->next_alive)
cbq_activate_class(cl);
@@ -649,7 +648,6 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
ret = qdisc_enqueue(skb, cl->q);
if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++;
- qdisc_bstats_update(sch, skb);
if (!cl->next_alive)
cbq_activate_class(cl);
return 0;
@@ -971,6 +969,7 @@ cbq_dequeue(struct Qdisc *sch)
skb = cbq_dequeue_1(sch);
if (skb) {
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
sch->flags &= ~TCQ_F_THROTTLED;
return skb;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index de55e64..6b7fe4a 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -376,7 +376,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
}
bstats_update(&cl->bstats, skb);
- qdisc_bstats_update(sch, skb);
sch->q.qlen++;
return err;
@@ -403,6 +402,7 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
skb = qdisc_dequeue_peeked(cl->qdisc);
if (cl->qdisc->q.qlen == 0)
list_del(&cl->alist);
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
}
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 60f4bdd..0f7bf3f 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -260,7 +260,6 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return err;
}
- qdisc_bstats_update(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
@@ -283,6 +282,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
if (skb == NULL)
return NULL;
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
index = skb->tc_index & (p->indices - 1);
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index aa4d633..f2557e0 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -53,7 +53,7 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch)
return qdisc_enqueue_tail(skb, sch);
/* queue full, remove one skb to fulfill the limit */
- skb_head = qdisc_dequeue_head(sch);
+ skb_head = __qdisc_dequeue_head(sch, &sch->q, false);
sch->qstats.drops++;
kfree_skb(skb_head);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 34dc598..eeb6343 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -467,7 +467,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
if (likely(band >= 0)) {
struct sk_buff_head *list = band2list(priv, band);
- struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list);
+ struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list, true);
qdisc->q.qlen--;
if (skb_queue_empty(list))
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 2e45791..14a799d 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1600,7 +1600,6 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
set_active(cl, qdisc_pkt_len(skb));
bstats_update(&cl->bstats, skb);
- qdisc_bstats_update(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
@@ -1666,6 +1665,7 @@ hfsc_dequeue(struct Qdisc *sch)
}
sch->flags &= ~TCQ_F_THROTTLED;
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 984c1b0..fc12fe6 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -574,7 +574,6 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
}
sch->q.qlen++;
- qdisc_bstats_update(sch, skb);
return NET_XMIT_SUCCESS;
}
@@ -842,7 +841,7 @@ next:
static struct sk_buff *htb_dequeue(struct Qdisc *sch)
{
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb;
struct htb_sched *q = qdisc_priv(sch);
int level;
psched_time_t next_event;
@@ -851,6 +850,8 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
/* try to dequeue direct packets as high prio (!) to minimize cpu work */
skb = __skb_dequeue(&q->direct_queue);
if (skb != NULL) {
+ok:
+ qdisc_bstats_update(sch, skb);
sch->flags &= ~TCQ_F_THROTTLED;
sch->q.qlen--;
return skb;
@@ -884,11 +885,8 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
int prio = ffz(m);
m |= 1 << prio;
skb = htb_dequeue_tree(q, prio, level);
- if (likely(skb != NULL)) {
- sch->q.qlen--;
- sch->flags &= ~TCQ_F_THROTTLED;
- goto fin;
- }
+ if (likely(skb != NULL))
+ goto ok;
}
}
sch->qstats.overlimits++;
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 21f13da..436a2e7 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -83,7 +83,6 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
ret = qdisc_enqueue(skb, qdisc);
if (ret == NET_XMIT_SUCCESS) {
- qdisc_bstats_update(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
@@ -112,6 +111,7 @@ static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
qdisc = q->queues[q->curband];
skb = qdisc->dequeue(qdisc);
if (skb) {
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 1c4bce8..6a3006b 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -240,7 +240,6 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (likely(ret == NET_XMIT_SUCCESS)) {
sch->q.qlen++;
- qdisc_bstats_update(sch, skb);
} else if (net_xmit_drop_count(ret)) {
sch->qstats.drops++;
}
@@ -289,6 +288,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
skb->tstamp.tv64 = 0;
#endif
pr_debug("netem_dequeue: return skb=%p\n", skb);
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
}
@@ -476,7 +476,6 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
__skb_queue_after(list, skb, nskb);
sch->qstats.backlog += qdisc_pkt_len(nskb);
- qdisc_bstats_update(sch, nskb);
return NET_XMIT_SUCCESS;
}
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 966158d..fbd710d 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -84,7 +84,6 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
ret = qdisc_enqueue(skb, qdisc);
if (ret == NET_XMIT_SUCCESS) {
- qdisc_bstats_update(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
@@ -116,6 +115,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc* sch)
struct Qdisc *qdisc = q->queues[prio];
struct sk_buff *skb = qdisc->dequeue(qdisc);
if (skb) {
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
}
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index a6009c5..9f98dbd 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -94,7 +94,6 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
ret = qdisc_enqueue(skb, child);
if (likely(ret == NET_XMIT_SUCCESS)) {
- qdisc_bstats_update(sch, skb);
sch->q.qlen++;
} else if (net_xmit_drop_count(ret)) {
q->stats.pdrop++;
@@ -114,11 +113,13 @@ static struct sk_buff * red_dequeue(struct Qdisc* sch)
struct Qdisc *child = q->qdisc;
skb = child->dequeue(child);
- if (skb)
+ if (skb) {
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
- else if (!red_is_idling(&q->parms))
- red_start_of_idle_period(&q->parms);
-
+ } else {
+ if (!red_is_idling(&q->parms))
+ red_start_of_idle_period(&q->parms);
+ }
return skb;
}
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 239ec53..edea8ce 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -402,10 +402,8 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
q->tail = slot;
slot->allot = q->scaled_quantum;
}
- if (++sch->q.qlen <= q->limit) {
- qdisc_bstats_update(sch, skb);
+ if (++sch->q.qlen <= q->limit)
return NET_XMIT_SUCCESS;
- }
sfq_drop(sch);
return NET_XMIT_CN;
@@ -445,6 +443,7 @@ next_slot:
}
skb = slot_dequeue_head(slot);
sfq_dec(q, a);
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
sch->qstats.backlog -= qdisc_pkt_len(skb);
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 77565e7..e931658 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -134,7 +134,6 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
}
sch->q.qlen++;
- qdisc_bstats_update(sch, skb);
return NET_XMIT_SUCCESS;
}
@@ -187,6 +186,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
q->ptokens = ptoks;
sch->q.qlen--;
sch->flags &= ~TCQ_F_THROTTLED;
+ qdisc_bstats_update(sch, skb);
return skb;
}
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index af9360d..2ecd9af 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -83,7 +83,6 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
if (q->q.qlen < dev->tx_queue_len) {
__skb_queue_tail(&q->q, skb);
- qdisc_bstats_update(sch, skb);
return NET_XMIT_SUCCESS;
}
@@ -107,6 +106,8 @@ teql_dequeue(struct Qdisc* sch)
dat->m->slaves = sch;
netif_wake_queue(m);
}
+ } else {
+ qdisc_bstats_update(sch, skb);
}
sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen;
return skb;
^ permalink raw reply related [flat|nested] 11+ messages in thread
* Re: [PATCH] net_sched: accurate bytes/packets stats/rates
2011-01-14 16:16 [PATCH] net_sched: accurate bytes/packets stats/rates Eric Dumazet
@ 2011-01-14 17:52 ` Stephen Hemminger
2011-01-14 18:08 ` Eric Dumazet
0 siblings, 1 reply; 11+ messages in thread
From: Stephen Hemminger @ 2011-01-14 17:52 UTC (permalink / raw)
To: Eric Dumazet
Cc: David Miller, netdev, Patrick McHardy, jamal, Jarek Poplawski
By using __qdisc_queue_drop_head in sch_fifo.c the stats_update parameter won't be
needed.
>From Eric Dumazet <eric.dumazet@gmail.com>
In commit 44b8288308ac9d (net_sched: pfifo_head_drop problem), we fixed
a problem with pfifo_head drops that incorrectly decreased
sch->bstats.bytes and sch->bstats.packets
Several qdiscs (CHOKe, SFQ, pfifo_head, ...) are able to drop a
previously enqueued packet, and bstats cannot be changed, so
bstats/rates are not accurate (over estimated)
This patch changes the qdisc_bstats updates to be done at dequeue() time
instead of enqueue() time. bstats counters no longer account for dropped
frames, and rates are more correct, since enqueue() bursts dont have
effect on dequeue() rate.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Acked-by: Stephen Hemminger <shemminger@vyatta.com>
CC: Patrick McHardy <kaber@trash.net>
CC: Jarek Poplawski <jarkao2@gmail.com>
CC: jamal <hadi@cyberus.ca>
---
include/net/sch_generic.h | 8 +++++---
net/sched/sch_cbq.c | 3 +--
net/sched/sch_drr.c | 2 +-
net/sched/sch_dsmark.c | 2 +-
net/sched/sch_fifo.c | 6 +-----
net/sched/sch_hfsc.c | 2 +-
net/sched/sch_htb.c | 12 +++++-------
net/sched/sch_multiq.c | 2 +-
net/sched/sch_netem.c | 3 +--
net/sched/sch_prio.c | 2 +-
net/sched/sch_red.c | 11 ++++++-----
net/sched/sch_sfq.c | 5 ++---
net/sched/sch_tbf.c | 2 +-
net/sched/sch_teql.c | 3 ++-
14 files changed, 29 insertions(+), 34 deletions(-)
--- a/include/net/sch_generic.h 2011-01-14 09:19:00.730849868 -0800
+++ b/include/net/sch_generic.h 2011-01-14 09:47:59.058551676 -0800
@@ -445,7 +445,6 @@ static inline int __qdisc_enqueue_tail(s
{
__skb_queue_tail(list, skb);
sch->qstats.backlog += qdisc_pkt_len(skb);
- qdisc_bstats_update(sch, skb);
return NET_XMIT_SUCCESS;
}
@@ -460,8 +459,10 @@ static inline struct sk_buff *__qdisc_de
{
struct sk_buff *skb = __skb_dequeue(list);
- if (likely(skb != NULL))
+ if (likely(skb != NULL)) {
sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_bstats_update(sch, skb);
+ }
return skb;
}
@@ -474,10 +475,11 @@ static inline struct sk_buff *qdisc_dequ
static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
struct sk_buff_head *list)
{
- struct sk_buff *skb = __qdisc_dequeue_head(sch, list);
+ struct sk_buff *skb = __skb_dequeue(list);
if (likely(skb != NULL)) {
unsigned int len = qdisc_pkt_len(skb);
+ sch->qstats.backlog -= len;
kfree_skb(skb);
return len;
}
--- a/net/sched/sch_cbq.c 2011-01-14 09:19:00.830857886 -0800
+++ b/net/sched/sch_cbq.c 2011-01-14 09:28:20.398631228 -0800
@@ -390,7 +390,6 @@ cbq_enqueue(struct sk_buff *skb, struct
ret = qdisc_enqueue(skb, cl->q);
if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++;
- qdisc_bstats_update(sch, skb);
cbq_mark_toplevel(q, cl);
if (!cl->next_alive)
cbq_activate_class(cl);
@@ -649,7 +648,6 @@ static int cbq_reshape_fail(struct sk_bu
ret = qdisc_enqueue(skb, cl->q);
if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++;
- qdisc_bstats_update(sch, skb);
if (!cl->next_alive)
cbq_activate_class(cl);
return 0;
@@ -971,6 +969,7 @@ cbq_dequeue(struct Qdisc *sch)
skb = cbq_dequeue_1(sch);
if (skb) {
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
sch->flags &= ~TCQ_F_THROTTLED;
return skb;
--- a/net/sched/sch_drr.c 2011-01-14 09:19:00.830857886 -0800
+++ b/net/sched/sch_drr.c 2011-01-14 09:28:20.398631228 -0800
@@ -376,7 +376,6 @@ static int drr_enqueue(struct sk_buff *s
}
bstats_update(&cl->bstats, skb);
- qdisc_bstats_update(sch, skb);
sch->q.qlen++;
return err;
@@ -403,6 +402,7 @@ static struct sk_buff *drr_dequeue(struc
skb = qdisc_dequeue_peeked(cl->qdisc);
if (cl->qdisc->q.qlen == 0)
list_del(&cl->alist);
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
}
--- a/net/sched/sch_dsmark.c 2011-01-14 09:19:00.830857886 -0800
+++ b/net/sched/sch_dsmark.c 2011-01-14 09:28:20.398631228 -0800
@@ -260,7 +260,6 @@ static int dsmark_enqueue(struct sk_buff
return err;
}
- qdisc_bstats_update(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
@@ -283,6 +282,7 @@ static struct sk_buff *dsmark_dequeue(st
if (skb == NULL)
return NULL;
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
index = skb->tc_index & (p->indices - 1);
--- a/net/sched/sch_fifo.c 2011-01-09 09:34:32.690685246 -0800
+++ b/net/sched/sch_fifo.c 2011-01-14 09:50:37.082839684 -0800
@@ -46,17 +46,13 @@ static int pfifo_enqueue(struct sk_buff
static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
- struct sk_buff *skb_head;
struct fifo_sched_data *q = qdisc_priv(sch);
if (likely(skb_queue_len(&sch->q) < q->limit))
return qdisc_enqueue_tail(skb, sch);
/* queue full, remove one skb to fulfill the limit */
- skb_head = qdisc_dequeue_head(sch);
- sch->qstats.drops++;
- kfree_skb(skb_head);
-
+ __qdisc_queue_drop_head(sch, &sch->q);
qdisc_enqueue_tail(skb, sch);
return NET_XMIT_CN;
--- a/net/sched/sch_hfsc.c 2011-01-14 09:19:00.830857886 -0800
+++ b/net/sched/sch_hfsc.c 2011-01-14 09:28:20.428633918 -0800
@@ -1600,7 +1600,6 @@ hfsc_enqueue(struct sk_buff *skb, struct
set_active(cl, qdisc_pkt_len(skb));
bstats_update(&cl->bstats, skb);
- qdisc_bstats_update(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
@@ -1666,6 +1665,7 @@ hfsc_dequeue(struct Qdisc *sch)
}
sch->flags &= ~TCQ_F_THROTTLED;
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
--- a/net/sched/sch_htb.c 2011-01-14 09:19:00.830857886 -0800
+++ b/net/sched/sch_htb.c 2011-01-14 09:28:20.438634799 -0800
@@ -574,7 +574,6 @@ static int htb_enqueue(struct sk_buff *s
}
sch->q.qlen++;
- qdisc_bstats_update(sch, skb);
return NET_XMIT_SUCCESS;
}
@@ -842,7 +841,7 @@ next:
static struct sk_buff *htb_dequeue(struct Qdisc *sch)
{
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb;
struct htb_sched *q = qdisc_priv(sch);
int level;
psched_time_t next_event;
@@ -851,6 +850,8 @@ static struct sk_buff *htb_dequeue(struc
/* try to dequeue direct packets as high prio (!) to minimize cpu work */
skb = __skb_dequeue(&q->direct_queue);
if (skb != NULL) {
+ok:
+ qdisc_bstats_update(sch, skb);
sch->flags &= ~TCQ_F_THROTTLED;
sch->q.qlen--;
return skb;
@@ -884,11 +885,8 @@ static struct sk_buff *htb_dequeue(struc
int prio = ffz(m);
m |= 1 << prio;
skb = htb_dequeue_tree(q, prio, level);
- if (likely(skb != NULL)) {
- sch->q.qlen--;
- sch->flags &= ~TCQ_F_THROTTLED;
- goto fin;
- }
+ if (likely(skb != NULL))
+ goto ok;
}
}
sch->qstats.overlimits++;
--- a/net/sched/sch_multiq.c 2011-01-14 09:19:00.830857886 -0800
+++ b/net/sched/sch_multiq.c 2011-01-14 09:28:20.438634799 -0800
@@ -83,7 +83,6 @@ multiq_enqueue(struct sk_buff *skb, stru
ret = qdisc_enqueue(skb, qdisc);
if (ret == NET_XMIT_SUCCESS) {
- qdisc_bstats_update(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
@@ -112,6 +111,7 @@ static struct sk_buff *multiq_dequeue(st
qdisc = q->queues[q->curband];
skb = qdisc->dequeue(qdisc);
if (skb) {
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
}
--- a/net/sched/sch_netem.c 2011-01-14 09:19:00.830857886 -0800
+++ b/net/sched/sch_netem.c 2011-01-14 09:28:20.438634799 -0800
@@ -240,7 +240,6 @@ static int netem_enqueue(struct sk_buff
if (likely(ret == NET_XMIT_SUCCESS)) {
sch->q.qlen++;
- qdisc_bstats_update(sch, skb);
} else if (net_xmit_drop_count(ret)) {
sch->qstats.drops++;
}
@@ -289,6 +288,7 @@ static struct sk_buff *netem_dequeue(str
skb->tstamp.tv64 = 0;
#endif
pr_debug("netem_dequeue: return skb=%p\n", skb);
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
}
@@ -476,7 +476,6 @@ static int tfifo_enqueue(struct sk_buff
__skb_queue_after(list, skb, nskb);
sch->qstats.backlog += qdisc_pkt_len(nskb);
- qdisc_bstats_update(sch, nskb);
return NET_XMIT_SUCCESS;
}
--- a/net/sched/sch_prio.c 2011-01-14 09:19:00.830857886 -0800
+++ b/net/sched/sch_prio.c 2011-01-14 09:28:20.438634799 -0800
@@ -84,7 +84,6 @@ prio_enqueue(struct sk_buff *skb, struct
ret = qdisc_enqueue(skb, qdisc);
if (ret == NET_XMIT_SUCCESS) {
- qdisc_bstats_update(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
@@ -116,6 +115,7 @@ static struct sk_buff *prio_dequeue(stru
struct Qdisc *qdisc = q->queues[prio];
struct sk_buff *skb = qdisc->dequeue(qdisc);
if (skb) {
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
}
--- a/net/sched/sch_red.c 2011-01-14 09:19:00.830857886 -0800
+++ b/net/sched/sch_red.c 2011-01-14 09:28:20.438634799 -0800
@@ -94,7 +94,6 @@ static int red_enqueue(struct sk_buff *s
ret = qdisc_enqueue(skb, child);
if (likely(ret == NET_XMIT_SUCCESS)) {
- qdisc_bstats_update(sch, skb);
sch->q.qlen++;
} else if (net_xmit_drop_count(ret)) {
q->stats.pdrop++;
@@ -114,11 +113,13 @@ static struct sk_buff * red_dequeue(stru
struct Qdisc *child = q->qdisc;
skb = child->dequeue(child);
- if (skb)
+ if (skb) {
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
- else if (!red_is_idling(&q->parms))
- red_start_of_idle_period(&q->parms);
-
+ } else {
+ if (!red_is_idling(&q->parms))
+ red_start_of_idle_period(&q->parms);
+ }
return skb;
}
--- a/net/sched/sch_sfq.c 2011-01-14 09:19:00.830857886 -0800
+++ b/net/sched/sch_sfq.c 2011-01-14 09:28:20.438634799 -0800
@@ -402,10 +402,8 @@ sfq_enqueue(struct sk_buff *skb, struct
q->tail = slot;
slot->allot = q->scaled_quantum;
}
- if (++sch->q.qlen <= q->limit) {
- qdisc_bstats_update(sch, skb);
+ if (++sch->q.qlen <= q->limit)
return NET_XMIT_SUCCESS;
- }
sfq_drop(sch);
return NET_XMIT_CN;
@@ -445,6 +443,7 @@ next_slot:
}
skb = slot_dequeue_head(slot);
sfq_dec(q, a);
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
sch->qstats.backlog -= qdisc_pkt_len(skb);
--- a/net/sched/sch_tbf.c 2011-01-14 09:19:00.830857886 -0800
+++ b/net/sched/sch_tbf.c 2011-01-14 09:28:20.438634799 -0800
@@ -134,7 +134,6 @@ static int tbf_enqueue(struct sk_buff *s
}
sch->q.qlen++;
- qdisc_bstats_update(sch, skb);
return NET_XMIT_SUCCESS;
}
@@ -187,6 +186,7 @@ static struct sk_buff *tbf_dequeue(struc
q->ptokens = ptoks;
sch->q.qlen--;
sch->flags &= ~TCQ_F_THROTTLED;
+ qdisc_bstats_update(sch, skb);
return skb;
}
--- a/net/sched/sch_teql.c 2011-01-14 09:19:00.830857886 -0800
+++ b/net/sched/sch_teql.c 2011-01-14 09:28:20.438634799 -0800
@@ -83,7 +83,6 @@ teql_enqueue(struct sk_buff *skb, struct
if (q->q.qlen < dev->tx_queue_len) {
__skb_queue_tail(&q->q, skb);
- qdisc_bstats_update(sch, skb);
return NET_XMIT_SUCCESS;
}
@@ -107,6 +106,8 @@ teql_dequeue(struct Qdisc* sch)
dat->m->slaves = sch;
netif_wake_queue(m);
}
+ } else {
+ qdisc_bstats_update(sch, skb);
}
sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen;
return skb;
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH] net_sched: accurate bytes/packets stats/rates
2011-01-14 17:52 ` Stephen Hemminger
@ 2011-01-14 18:08 ` Eric Dumazet
2011-01-14 19:03 ` Stephen Hemminger
0 siblings, 1 reply; 11+ messages in thread
From: Eric Dumazet @ 2011-01-14 18:08 UTC (permalink / raw)
To: Stephen Hemminger
Cc: David Miller, netdev, Patrick McHardy, jamal, Jarek Poplawski
Le vendredi 14 janvier 2011 à 09:52 -0800, Stephen Hemminger a écrit :
> By using __qdisc_queue_drop_head in sch_fifo.c the stats_update parameter won't be
> needed.
Hmm, this is a constant parameter in inline function so removed by
compiler.
And you add a bug in pfifo_tail_enqueue(), it now lacks the
sch->qstats.drops++;
>
> From Eric Dumazet <eric.dumazet@gmail.com>
>
Note : This line should be the first one in mail ;)
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH] net_sched: accurate bytes/packets stats/rates
2011-01-14 18:08 ` Eric Dumazet
@ 2011-01-14 19:03 ` Stephen Hemminger
2011-01-14 19:21 ` Eric Dumazet
` (2 more replies)
0 siblings, 3 replies; 11+ messages in thread
From: Stephen Hemminger @ 2011-01-14 19:03 UTC (permalink / raw)
To: Eric Dumazet
Cc: David Miller, netdev, Patrick McHardy, jamal, Jarek Poplawski
>From Eric Dumazet <eric.dumazet@gmail.com>
In commit 44b8288308ac9d (net_sched: pfifo_head_drop problem), we fixed
a problem with pfifo_head drops that incorrectly decreased
sch->bstats.bytes and sch->bstats.packets
Several qdiscs (CHOKe, SFQ, pfifo_head, ...) are able to drop a
previously enqueued packet, and bstats cannot be changed, so
bstats/rates are not accurate (over estimated)
This patch changes the qdisc_bstats updates to be done at dequeue() time
instead of enqueue() time. bstats counters no longer account for dropped
frames, and rates are more correct, since enqueue() bursts dont have
effect on dequeue() rate.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Acked-by: Stephen Hemminger <shemminger@vyatta.com>
CC: Patrick McHardy <kaber@trash.net>
CC: Jarek Poplawski <jarkao2@gmail.com>
CC: jamal <hadi@cyberus.ca>
---
sch_fifo now changed to use __qdisc_queue_drop_head which
keeps correct statistics and is actually clearer.
include/net/sch_generic.h | 8 +++++---
net/sched/sch_cbq.c | 3 +--
net/sched/sch_drr.c | 2 +-
net/sched/sch_dsmark.c | 2 +-
net/sched/sch_fifo.c | 6 +-----
net/sched/sch_hfsc.c | 2 +-
net/sched/sch_htb.c | 12 +++++-------
net/sched/sch_multiq.c | 2 +-
net/sched/sch_netem.c | 3 +--
net/sched/sch_prio.c | 2 +-
net/sched/sch_red.c | 11 ++++++-----
net/sched/sch_sfq.c | 5 ++---
net/sched/sch_tbf.c | 2 +-
net/sched/sch_teql.c | 3 ++-
14 files changed, 29 insertions(+), 34 deletions(-)
--- a/include/net/sch_generic.h 2011-01-14 09:19:00.730849868 -0800
+++ b/include/net/sch_generic.h 2011-01-14 09:47:59.058551676 -0800
@@ -445,7 +445,6 @@ static inline int __qdisc_enqueue_tail(s
{
__skb_queue_tail(list, skb);
sch->qstats.backlog += qdisc_pkt_len(skb);
- qdisc_bstats_update(sch, skb);
return NET_XMIT_SUCCESS;
}
@@ -460,8 +459,10 @@ static inline struct sk_buff *__qdisc_de
{
struct sk_buff *skb = __skb_dequeue(list);
- if (likely(skb != NULL))
+ if (likely(skb != NULL)) {
sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_bstats_update(sch, skb);
+ }
return skb;
}
@@ -474,10 +475,11 @@ static inline struct sk_buff *qdisc_dequ
static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
struct sk_buff_head *list)
{
- struct sk_buff *skb = __qdisc_dequeue_head(sch, list);
+ struct sk_buff *skb = __skb_dequeue(list);
if (likely(skb != NULL)) {
unsigned int len = qdisc_pkt_len(skb);
+ sch->qstats.backlog -= len;
kfree_skb(skb);
return len;
}
--- a/net/sched/sch_cbq.c 2011-01-14 09:19:00.830857886 -0800
+++ b/net/sched/sch_cbq.c 2011-01-14 09:28:20.398631228 -0800
@@ -390,7 +390,6 @@ cbq_enqueue(struct sk_buff *skb, struct
ret = qdisc_enqueue(skb, cl->q);
if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++;
- qdisc_bstats_update(sch, skb);
cbq_mark_toplevel(q, cl);
if (!cl->next_alive)
cbq_activate_class(cl);
@@ -649,7 +648,6 @@ static int cbq_reshape_fail(struct sk_bu
ret = qdisc_enqueue(skb, cl->q);
if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++;
- qdisc_bstats_update(sch, skb);
if (!cl->next_alive)
cbq_activate_class(cl);
return 0;
@@ -971,6 +969,7 @@ cbq_dequeue(struct Qdisc *sch)
skb = cbq_dequeue_1(sch);
if (skb) {
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
sch->flags &= ~TCQ_F_THROTTLED;
return skb;
--- a/net/sched/sch_drr.c 2011-01-14 09:19:00.830857886 -0800
+++ b/net/sched/sch_drr.c 2011-01-14 09:28:20.398631228 -0800
@@ -376,7 +376,6 @@ static int drr_enqueue(struct sk_buff *s
}
bstats_update(&cl->bstats, skb);
- qdisc_bstats_update(sch, skb);
sch->q.qlen++;
return err;
@@ -403,6 +402,7 @@ static struct sk_buff *drr_dequeue(struc
skb = qdisc_dequeue_peeked(cl->qdisc);
if (cl->qdisc->q.qlen == 0)
list_del(&cl->alist);
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
}
--- a/net/sched/sch_dsmark.c 2011-01-14 09:19:00.830857886 -0800
+++ b/net/sched/sch_dsmark.c 2011-01-14 09:28:20.398631228 -0800
@@ -260,7 +260,6 @@ static int dsmark_enqueue(struct sk_buff
return err;
}
- qdisc_bstats_update(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
@@ -283,6 +282,7 @@ static struct sk_buff *dsmark_dequeue(st
if (skb == NULL)
return NULL;
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
index = skb->tc_index & (p->indices - 1);
--- a/net/sched/sch_fifo.c 2011-01-09 09:34:32.690685246 -0800
+++ b/net/sched/sch_fifo.c 2011-01-14 10:43:39.534246186 -0800
@@ -46,17 +46,14 @@ static int pfifo_enqueue(struct sk_buff
static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
- struct sk_buff *skb_head;
struct fifo_sched_data *q = qdisc_priv(sch);
if (likely(skb_queue_len(&sch->q) < q->limit))
return qdisc_enqueue_tail(skb, sch);
/* queue full, remove one skb to fulfill the limit */
- skb_head = qdisc_dequeue_head(sch);
+ __qdisc_queue_drop_head(sch, &sch->q);
sch->qstats.drops++;
- kfree_skb(skb_head);
-
qdisc_enqueue_tail(skb, sch);
return NET_XMIT_CN;
--- a/net/sched/sch_hfsc.c 2011-01-14 09:19:00.830857886 -0800
+++ b/net/sched/sch_hfsc.c 2011-01-14 09:28:20.428633918 -0800
@@ -1600,7 +1600,6 @@ hfsc_enqueue(struct sk_buff *skb, struct
set_active(cl, qdisc_pkt_len(skb));
bstats_update(&cl->bstats, skb);
- qdisc_bstats_update(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
@@ -1666,6 +1665,7 @@ hfsc_dequeue(struct Qdisc *sch)
}
sch->flags &= ~TCQ_F_THROTTLED;
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
--- a/net/sched/sch_htb.c 2011-01-14 09:19:00.830857886 -0800
+++ b/net/sched/sch_htb.c 2011-01-14 09:28:20.438634799 -0800
@@ -574,7 +574,6 @@ static int htb_enqueue(struct sk_buff *s
}
sch->q.qlen++;
- qdisc_bstats_update(sch, skb);
return NET_XMIT_SUCCESS;
}
@@ -842,7 +841,7 @@ next:
static struct sk_buff *htb_dequeue(struct Qdisc *sch)
{
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb;
struct htb_sched *q = qdisc_priv(sch);
int level;
psched_time_t next_event;
@@ -851,6 +850,8 @@ static struct sk_buff *htb_dequeue(struc
/* try to dequeue direct packets as high prio (!) to minimize cpu work */
skb = __skb_dequeue(&q->direct_queue);
if (skb != NULL) {
+ok:
+ qdisc_bstats_update(sch, skb);
sch->flags &= ~TCQ_F_THROTTLED;
sch->q.qlen--;
return skb;
@@ -884,11 +885,8 @@ static struct sk_buff *htb_dequeue(struc
int prio = ffz(m);
m |= 1 << prio;
skb = htb_dequeue_tree(q, prio, level);
- if (likely(skb != NULL)) {
- sch->q.qlen--;
- sch->flags &= ~TCQ_F_THROTTLED;
- goto fin;
- }
+ if (likely(skb != NULL))
+ goto ok;
}
}
sch->qstats.overlimits++;
--- a/net/sched/sch_multiq.c 2011-01-14 09:19:00.830857886 -0800
+++ b/net/sched/sch_multiq.c 2011-01-14 09:28:20.438634799 -0800
@@ -83,7 +83,6 @@ multiq_enqueue(struct sk_buff *skb, stru
ret = qdisc_enqueue(skb, qdisc);
if (ret == NET_XMIT_SUCCESS) {
- qdisc_bstats_update(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
@@ -112,6 +111,7 @@ static struct sk_buff *multiq_dequeue(st
qdisc = q->queues[q->curband];
skb = qdisc->dequeue(qdisc);
if (skb) {
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
}
--- a/net/sched/sch_netem.c 2011-01-14 09:19:00.830857886 -0800
+++ b/net/sched/sch_netem.c 2011-01-14 09:28:20.438634799 -0800
@@ -240,7 +240,6 @@ static int netem_enqueue(struct sk_buff
if (likely(ret == NET_XMIT_SUCCESS)) {
sch->q.qlen++;
- qdisc_bstats_update(sch, skb);
} else if (net_xmit_drop_count(ret)) {
sch->qstats.drops++;
}
@@ -289,6 +288,7 @@ static struct sk_buff *netem_dequeue(str
skb->tstamp.tv64 = 0;
#endif
pr_debug("netem_dequeue: return skb=%p\n", skb);
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
}
@@ -476,7 +476,6 @@ static int tfifo_enqueue(struct sk_buff
__skb_queue_after(list, skb, nskb);
sch->qstats.backlog += qdisc_pkt_len(nskb);
- qdisc_bstats_update(sch, nskb);
return NET_XMIT_SUCCESS;
}
--- a/net/sched/sch_prio.c 2011-01-14 09:19:00.830857886 -0800
+++ b/net/sched/sch_prio.c 2011-01-14 09:28:20.438634799 -0800
@@ -84,7 +84,6 @@ prio_enqueue(struct sk_buff *skb, struct
ret = qdisc_enqueue(skb, qdisc);
if (ret == NET_XMIT_SUCCESS) {
- qdisc_bstats_update(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
@@ -116,6 +115,7 @@ static struct sk_buff *prio_dequeue(stru
struct Qdisc *qdisc = q->queues[prio];
struct sk_buff *skb = qdisc->dequeue(qdisc);
if (skb) {
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
return skb;
}
--- a/net/sched/sch_red.c 2011-01-14 09:19:00.830857886 -0800
+++ b/net/sched/sch_red.c 2011-01-14 09:28:20.438634799 -0800
@@ -94,7 +94,6 @@ static int red_enqueue(struct sk_buff *s
ret = qdisc_enqueue(skb, child);
if (likely(ret == NET_XMIT_SUCCESS)) {
- qdisc_bstats_update(sch, skb);
sch->q.qlen++;
} else if (net_xmit_drop_count(ret)) {
q->stats.pdrop++;
@@ -114,11 +113,13 @@ static struct sk_buff * red_dequeue(stru
struct Qdisc *child = q->qdisc;
skb = child->dequeue(child);
- if (skb)
+ if (skb) {
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
- else if (!red_is_idling(&q->parms))
- red_start_of_idle_period(&q->parms);
-
+ } else {
+ if (!red_is_idling(&q->parms))
+ red_start_of_idle_period(&q->parms);
+ }
return skb;
}
--- a/net/sched/sch_sfq.c 2011-01-14 09:19:00.830857886 -0800
+++ b/net/sched/sch_sfq.c 2011-01-14 09:28:20.438634799 -0800
@@ -402,10 +402,8 @@ sfq_enqueue(struct sk_buff *skb, struct
q->tail = slot;
slot->allot = q->scaled_quantum;
}
- if (++sch->q.qlen <= q->limit) {
- qdisc_bstats_update(sch, skb);
+ if (++sch->q.qlen <= q->limit)
return NET_XMIT_SUCCESS;
- }
sfq_drop(sch);
return NET_XMIT_CN;
@@ -445,6 +443,7 @@ next_slot:
}
skb = slot_dequeue_head(slot);
sfq_dec(q, a);
+ qdisc_bstats_update(sch, skb);
sch->q.qlen--;
sch->qstats.backlog -= qdisc_pkt_len(skb);
--- a/net/sched/sch_tbf.c 2011-01-14 09:19:00.830857886 -0800
+++ b/net/sched/sch_tbf.c 2011-01-14 09:28:20.438634799 -0800
@@ -134,7 +134,6 @@ static int tbf_enqueue(struct sk_buff *s
}
sch->q.qlen++;
- qdisc_bstats_update(sch, skb);
return NET_XMIT_SUCCESS;
}
@@ -187,6 +186,7 @@ static struct sk_buff *tbf_dequeue(struc
q->ptokens = ptoks;
sch->q.qlen--;
sch->flags &= ~TCQ_F_THROTTLED;
+ qdisc_bstats_update(sch, skb);
return skb;
}
--- a/net/sched/sch_teql.c 2011-01-14 09:19:00.830857886 -0800
+++ b/net/sched/sch_teql.c 2011-01-14 09:28:20.438634799 -0800
@@ -83,7 +83,6 @@ teql_enqueue(struct sk_buff *skb, struct
if (q->q.qlen < dev->tx_queue_len) {
__skb_queue_tail(&q->q, skb);
- qdisc_bstats_update(sch, skb);
return NET_XMIT_SUCCESS;
}
@@ -107,6 +106,8 @@ teql_dequeue(struct Qdisc* sch)
dat->m->slaves = sch;
netif_wake_queue(m);
}
+ } else {
+ qdisc_bstats_update(sch, skb);
}
sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen;
return skb;
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH] net_sched: accurate bytes/packets stats/rates
2011-01-14 19:03 ` Stephen Hemminger
@ 2011-01-14 19:21 ` Eric Dumazet
2011-01-16 22:35 ` Jarek Poplawski
2011-01-21 7:31 ` David Miller
2 siblings, 0 replies; 11+ messages in thread
From: Eric Dumazet @ 2011-01-14 19:21 UTC (permalink / raw)
To: Stephen Hemminger
Cc: David Miller, netdev, Patrick McHardy, jamal, Jarek Poplawski
Le vendredi 14 janvier 2011 à 11:03 -0800, Stephen Hemminger a écrit :
> From Eric Dumazet <eric.dumazet@gmail.com>
>
> In commit 44b8288308ac9d (net_sched: pfifo_head_drop problem), we fixed
> a problem with pfifo_head drops that incorrectly decreased
> sch->bstats.bytes and sch->bstats.packets
>
> Several qdiscs (CHOKe, SFQ, pfifo_head, ...) are able to drop a
> previously enqueued packet, and bstats cannot be changed, so
> bstats/rates are not accurate (over estimated)
>
> This patch changes the qdisc_bstats updates to be done at dequeue() time
> instead of enqueue() time. bstats counters no longer account for dropped
> frames, and rates are more correct, since enqueue() bursts dont have
> effect on dequeue() rate.
>
> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
> Acked-by: Stephen Hemminger <shemminger@vyatta.com>
>
> CC: Patrick McHardy <kaber@trash.net>
> CC: Jarek Poplawski <jarkao2@gmail.com>
> CC: jamal <hadi@cyberus.ca>
> ---
> sch_fifo now changed to use __qdisc_queue_drop_head which
> keeps correct statistics and is actually clearer.
>
>
Thanks for doing this Stephen, this version seems fine.
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH] net_sched: accurate bytes/packets stats/rates
2011-01-14 19:03 ` Stephen Hemminger
2011-01-14 19:21 ` Eric Dumazet
@ 2011-01-16 22:35 ` Jarek Poplawski
2011-01-17 0:09 ` Changli Gao
2011-01-17 6:54 ` Eric Dumazet
2011-01-21 7:31 ` David Miller
2 siblings, 2 replies; 11+ messages in thread
From: Jarek Poplawski @ 2011-01-16 22:35 UTC (permalink / raw)
To: Stephen Hemminger
Cc: Eric Dumazet, David Miller, netdev, Patrick McHardy, jamal
On Fri, Jan 14, 2011 at 11:03:42AM -0800, Stephen Hemminger wrote:
> From Eric Dumazet <eric.dumazet@gmail.com>
>
> In commit 44b8288308ac9d (net_sched: pfifo_head_drop problem), we fixed
> a problem with pfifo_head drops that incorrectly decreased
> sch->bstats.bytes and sch->bstats.packets
>
> Several qdiscs (CHOKe, SFQ, pfifo_head, ...) are able to drop a
> previously enqueued packet, and bstats cannot be changed, so
> bstats/rates are not accurate (over estimated)
>
> This patch changes the qdisc_bstats updates to be done at dequeue() time
> instead of enqueue() time. bstats counters no longer account for dropped
> frames, and rates are more correct, since enqueue() bursts dont have
> effect on dequeue() rate.
>
> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
> Acked-by: Stephen Hemminger <shemminger@vyatta.com>
>
> CC: Patrick McHardy <kaber@trash.net>
> CC: Jarek Poplawski <jarkao2@gmail.com>
> CC: jamal <hadi@cyberus.ca>
...
> --- a/net/sched/sch_drr.c 2011-01-14 09:19:00.830857886 -0800
> +++ b/net/sched/sch_drr.c 2011-01-14 09:28:20.398631228 -0800
> @@ -376,7 +376,6 @@ static int drr_enqueue(struct sk_buff *s
> }
>
> bstats_update(&cl->bstats, skb);
Why leave leaf classes with different stats?
Jarek P.
> - qdisc_bstats_update(sch, skb);
>
> sch->q.qlen++;
> return err;
...
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH] net_sched: accurate bytes/packets stats/rates
2011-01-16 22:35 ` Jarek Poplawski
@ 2011-01-17 0:09 ` Changli Gao
2011-01-17 7:17 ` Eric Dumazet
2011-01-17 6:54 ` Eric Dumazet
1 sibling, 1 reply; 11+ messages in thread
From: Changli Gao @ 2011-01-17 0:09 UTC (permalink / raw)
To: Jarek Poplawski
Cc: Stephen Hemminger, Eric Dumazet, David Miller, netdev,
Patrick McHardy, jamal
On Mon, Jan 17, 2011 at 6:35 AM, Jarek Poplawski <jarkao2@gmail.com> wrote:
> On Fri, Jan 14, 2011 at 11:03:42AM -0800, Stephen Hemminger wrote:
>> From Eric Dumazet <eric.dumazet@gmail.com>
>>
>> In commit 44b8288308ac9d (net_sched: pfifo_head_drop problem), we fixed
>> a problem with pfifo_head drops that incorrectly decreased
>> sch->bstats.bytes and sch->bstats.packets
>>
>> Several qdiscs (CHOKe, SFQ, pfifo_head, ...) are able to drop a
>> previously enqueued packet, and bstats cannot be changed, so
>> bstats/rates are not accurate (over estimated)
>>
>> This patch changes the qdisc_bstats updates to be done at dequeue() time
>> instead of enqueue() time. bstats counters no longer account for dropped
>> frames, and rates are more correct, since enqueue() bursts dont have
>> effect on dequeue() rate.
>>
>> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
>> Acked-by: Stephen Hemminger <shemminger@vyatta.com>
>>
>> CC: Patrick McHardy <kaber@trash.net>
>> CC: Jarek Poplawski <jarkao2@gmail.com>
>> CC: jamal <hadi@cyberus.ca>
> ...
>> --- a/net/sched/sch_drr.c 2011-01-14 09:19:00.830857886 -0800
>> +++ b/net/sched/sch_drr.c 2011-01-14 09:28:20.398631228 -0800
>> @@ -376,7 +376,6 @@ static int drr_enqueue(struct sk_buff *s
>> }
>>
>> bstats_update(&cl->bstats, skb);
>
> Why leave leaf classes with different stats?
>
HTB also has the same problem, and I have fixed in my own product, but
the patch was rejected.
http://patchwork.ozlabs.org/patch/6227/
> Jarek P.
>
>> - qdisc_bstats_update(sch, skb);
>>
>> sch->q.qlen++;
>> return err;
> ...
--
Regards,
Changli Gao(xiaosuo@gmail.com)
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH] net_sched: accurate bytes/packets stats/rates
2011-01-16 22:35 ` Jarek Poplawski
2011-01-17 0:09 ` Changli Gao
@ 2011-01-17 6:54 ` Eric Dumazet
1 sibling, 0 replies; 11+ messages in thread
From: Eric Dumazet @ 2011-01-17 6:54 UTC (permalink / raw)
To: Jarek Poplawski
Cc: Stephen Hemminger, David Miller, netdev, Patrick McHardy, jamal
Le dimanche 16 janvier 2011 à 23:35 +0100, Jarek Poplawski a écrit :
> Why leave leaf classes with different stats?
>
I wanted to address this point in a followup patch, once general qdisc
stats changes are accepted.
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH] net_sched: accurate bytes/packets stats/rates
2011-01-17 0:09 ` Changli Gao
@ 2011-01-17 7:17 ` Eric Dumazet
2011-01-17 9:16 ` Hagen Paul Pfeifer
0 siblings, 1 reply; 11+ messages in thread
From: Eric Dumazet @ 2011-01-17 7:17 UTC (permalink / raw)
To: Changli Gao
Cc: Jarek Poplawski, Stephen Hemminger, David Miller, netdev,
Patrick McHardy, jamal
Le lundi 17 janvier 2011 à 08:09 +0800, Changli Gao a écrit :
> On Mon, Jan 17, 2011 at 6:35 AM, Jarek Poplawski <jarkao2@gmail.com> wrote:
> > On Fri, Jan 14, 2011 at 11:03:42AM -0800, Stephen Hemminger wrote:
> >> From Eric Dumazet <eric.dumazet@gmail.com>
> >>
> >> In commit 44b8288308ac9d (net_sched: pfifo_head_drop problem), we fixed
> >> a problem with pfifo_head drops that incorrectly decreased
> >> sch->bstats.bytes and sch->bstats.packets
> >>
> >> Several qdiscs (CHOKe, SFQ, pfifo_head, ...) are able to drop a
> >> previously enqueued packet, and bstats cannot be changed, so
> >> bstats/rates are not accurate (over estimated)
> >>
> >> This patch changes the qdisc_bstats updates to be done at dequeue() time
> >> instead of enqueue() time. bstats counters no longer account for dropped
> >> frames, and rates are more correct, since enqueue() bursts dont have
> >> effect on dequeue() rate.
> >>
> >> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
> >> Acked-by: Stephen Hemminger <shemminger@vyatta.com>
> >>
> >> CC: Patrick McHardy <kaber@trash.net>
> >> CC: Jarek Poplawski <jarkao2@gmail.com>
> >> CC: jamal <hadi@cyberus.ca>
> > ...
> >> --- a/net/sched/sch_drr.c 2011-01-14 09:19:00.830857886 -0800
> >> +++ b/net/sched/sch_drr.c 2011-01-14 09:28:20.398631228 -0800
> >> @@ -376,7 +376,6 @@ static int drr_enqueue(struct sk_buff *s
> >> }
> >>
> >> bstats_update(&cl->bstats, skb);
> >
> > Why leave leaf classes with different stats?
> >
>
> HTB also has the same problem, and I have fixed in my own product, but
> the patch was rejected.
>
> http://patchwork.ozlabs.org/patch/6227/
>
Hmm, considering qdisc stats are not used in kernel (only updated and
reported to tc users) it seems to me counting arrival instead of
departure rates is mostly useless for the user, if drops are ignored.
(I am not speaking of direct drops, when we try to enqueue() this skb,
but later ones, when another skb is enqueued and we drop a previously
enqueued skb)
User really wants to see the effective departure rate, to check its
qdisc parameters in respect with kernel ones (HZ=100/1000, HIGH res
timers off/on, ...)
Arrival rates are of litle use. However, it might be good to have a
second "bstats" only for dropped packets/bytes, or extend bstats in a
compatible way (maybe adding fields to the end of structure)
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH] net_sched: accurate bytes/packets stats/rates
2011-01-17 7:17 ` Eric Dumazet
@ 2011-01-17 9:16 ` Hagen Paul Pfeifer
0 siblings, 0 replies; 11+ messages in thread
From: Hagen Paul Pfeifer @ 2011-01-17 9:16 UTC (permalink / raw)
To: Eric Dumazet
Cc: Changli Gao, Jarek Poplawski, Stephen Hemminger, David Miller,
netdev, Patrick McHardy, jamal
* Eric Dumazet | 2011-01-17 08:17:49 [+0100]:
>Hmm, considering qdisc stats are not used in kernel (only updated and
>reported to tc users) it seems to me counting arrival instead of
>departure rates is mostly useless for the user, if drops are ignored.
>
>(I am not speaking of direct drops, when we try to enqueue() this skb,
>but later ones, when another skb is enqueued and we drop a previously
>enqueued skb)
>
>User really wants to see the effective departure rate, to check its
>qdisc parameters in respect with kernel ones (HZ=100/1000, HIGH res
>timers off/on, ...)
>
>Arrival rates are of litle use. However, it might be good to have a
>second "bstats" only for dropped packets/bytes, or extend bstats in a
>compatible way (maybe adding fields to the end of structure)
Sure, qdiscs like CHOKe, SFQ, pfifo_head are only analyzable with this kind of
additional information. E.g. pfifo_head currently provides no statistic that
the queue length is possible underestimated and tunning is required.
Hagen
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH] net_sched: accurate bytes/packets stats/rates
2011-01-14 19:03 ` Stephen Hemminger
2011-01-14 19:21 ` Eric Dumazet
2011-01-16 22:35 ` Jarek Poplawski
@ 2011-01-21 7:31 ` David Miller
2 siblings, 0 replies; 11+ messages in thread
From: David Miller @ 2011-01-21 7:31 UTC (permalink / raw)
To: shemminger; +Cc: eric.dumazet, netdev, kaber, hadi, jarkao2
From: Stephen Hemminger <shemminger@vyatta.com>
Date: Fri, 14 Jan 2011 11:03:42 -0800
> From Eric Dumazet <eric.dumazet@gmail.com>
>
> In commit 44b8288308ac9d (net_sched: pfifo_head_drop problem), we fixed
> a problem with pfifo_head drops that incorrectly decreased
> sch->bstats.bytes and sch->bstats.packets
>
> Several qdiscs (CHOKe, SFQ, pfifo_head, ...) are able to drop a
> previously enqueued packet, and bstats cannot be changed, so
> bstats/rates are not accurate (over estimated)
>
> This patch changes the qdisc_bstats updates to be done at dequeue() time
> instead of enqueue() time. bstats counters no longer account for dropped
> frames, and rates are more correct, since enqueue() bursts dont have
> effect on dequeue() rate.
>
> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
> Acked-by: Stephen Hemminger <shemminger@vyatta.com>
Applied to net-2.6, thanks everyone.
^ permalink raw reply [flat|nested] 11+ messages in thread
end of thread, other threads:[~2011-01-21 7:31 UTC | newest]
Thread overview: 11+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2011-01-14 16:16 [PATCH] net_sched: accurate bytes/packets stats/rates Eric Dumazet
2011-01-14 17:52 ` Stephen Hemminger
2011-01-14 18:08 ` Eric Dumazet
2011-01-14 19:03 ` Stephen Hemminger
2011-01-14 19:21 ` Eric Dumazet
2011-01-16 22:35 ` Jarek Poplawski
2011-01-17 0:09 ` Changli Gao
2011-01-17 7:17 ` Eric Dumazet
2011-01-17 9:16 ` Hagen Paul Pfeifer
2011-01-17 6:54 ` Eric Dumazet
2011-01-21 7:31 ` David Miller
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).