netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCHSET 0/16] More gnet_stats conversions
@ 2004-10-21 12:32 Thomas Graf
  2004-10-21 12:33 ` [PATCH 1/16] PKT_SCHED: Requeues statistics Thomas Graf
                   ` (17 more replies)
  0 siblings, 18 replies; 21+ messages in thread
From: Thomas Graf @ 2004-10-21 12:32 UTC (permalink / raw)
  To: David S. Miller; +Cc: netdev, hadi

Dave,

This patchset contains requeues statistics, TLV type max cleanups, and
more conversions to the new generic statistic interface.

It converts all qdiscs to dump their xstats via gnet_stats_copy_app and
converts all class statistics in classful qdiscs to the new API.

The following patches must be applied as a group to not break
anything: 7-9, 10-12, 13-15. All other patches should keep the tree
in a working state. Like the previous qdisc changes, full backward
compatibility is given and all existing applications will continue
to work.

All patches except patch 16 have been tested for 1.5 weeks. The
ATM patch was modified today and only compile tested.

I will provide iproute2 patches for this soon.

Cheers.

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [PATCH 1/16] PKT_SCHED: Requeues statistics
  2004-10-21 12:32 [PATCHSET 0/16] More gnet_stats conversions Thomas Graf
@ 2004-10-21 12:33 ` Thomas Graf
  2004-10-21 12:34 ` [PATCH 2/16] PKT_SCHED: Max TLV types cleanup Thomas Graf
                   ` (16 subsequent siblings)
  17 siblings, 0 replies; 21+ messages in thread
From: Thomas Graf @ 2004-10-21 12:33 UTC (permalink / raw)
  To: David S. Miller; +Cc: netdev, hadi

Introduces requeue statistics for qdiscs. Patch is based on a
patch of Jamal Hadi Salim and adapted to new statistic API.

Signed-off-by: Thomas Graf <tgraf@suug.ch>


--- linux-2.6.9-rc5.orig/net/sched/sch_atm.c	2004-10-21 10:45:08.000000000 +0200
+++ linux-2.6.9-rc5/net/sched/sch_atm.c	2004-10-21 11:01:21.000000000 +0200
@@ -545,8 +545,10 @@
 
 	D2PRINTK("atm_tc_requeue(skb %p,sch %p,[qdisc %p])\n",skb,sch,p);
 	ret = p->link.q->ops->requeue(skb,p->link.q);
-	if (!ret) sch->q.qlen++;
-	else {
+	if (!ret) {
+        sch->q.qlen++;
+        sch->qstats.requeues++;
+    } else {
 		sch->qstats.drops++;
 		p->link.stats.drops++;
 	}
--- linux-2.6.9-rc5.orig/net/sched/sch_cbq.c	2004-10-21 10:45:08.000000000 +0200
+++ linux-2.6.9-rc5/net/sched/sch_cbq.c	2004-10-21 10:52:50.000000000 +0200
@@ -485,6 +485,7 @@
 #endif
 	if ((ret = cl->q->ops->requeue(skb, cl->q)) == 0) {
 		sch->q.qlen++;
+		sch->qstats.requeues++;
 		if (!cl->next_alive)
 			cbq_activate_class(cl);
 		return 0;
--- linux-2.6.9-rc5.orig/net/sched/sch_dsmark.c	2004-10-21 10:45:08.000000000 +0200
+++ linux-2.6.9-rc5/net/sched/sch_dsmark.c	2004-10-21 10:52:50.000000000 +0200
@@ -297,6 +297,7 @@
 	D2PRINTK("dsmark_requeue(skb %p,sch %p,[qdisc %p])\n",skb,sch,p);
         if ((ret = p->q->ops->requeue(skb, p->q)) == 0) {
 		sch->q.qlen++;
+		sch->qstats.requeues++;
 		return 0;
 	}
 	sch->qstats.drops++;
--- linux-2.6.9-rc5.orig/net/sched/sch_fifo.c	2004-10-21 10:45:08.000000000 +0200
+++ linux-2.6.9-rc5/net/sched/sch_fifo.c	2004-10-21 10:52:50.000000000 +0200
@@ -67,6 +67,7 @@
 {
 	__skb_queue_head(&sch->q, skb);
 	sch->qstats.backlog += skb->len;
+	sch->qstats.requeues++;
 	return 0;
 }
 
@@ -126,6 +127,7 @@
 pfifo_requeue(struct sk_buff *skb, struct Qdisc* sch)
 {
 	__skb_queue_head(&sch->q, skb);
+	sch->qstats.requeues++;
 	return 0;
 }
 
--- linux-2.6.9-rc5.orig/net/sched/sch_generic.c	2004-10-21 10:45:08.000000000 +0200
+++ linux-2.6.9-rc5/net/sched/sch_generic.c	2004-10-21 10:52:50.000000000 +0200
@@ -353,6 +353,7 @@
 
 	__skb_queue_head(list, skb);
 	qdisc->q.qlen++;
+	qdisc->qstats.requeues++;
 	return 0;
 }
 
--- linux-2.6.9-rc5.orig/net/sched/sch_gred.c	2004-10-21 10:45:08.000000000 +0200
+++ linux-2.6.9-rc5/net/sched/sch_gred.c	2004-10-21 10:52:50.000000000 +0200
@@ -222,6 +222,7 @@
 
 	__skb_queue_head(&sch->q, skb);
 	sch->qstats.backlog += skb->len;
+	sch->qstats.requeues++;
 	q->backlog += skb->len;
 	return 0;
 }
--- linux-2.6.9-rc5.orig/net/sched/sch_hfsc.c	2004-10-21 10:45:08.000000000 +0200
+++ linux-2.6.9-rc5/net/sched/sch_hfsc.c	2004-10-21 10:52:50.000000000 +0200
@@ -1779,6 +1779,7 @@
 
 	__skb_queue_head(&q->requeue, skb);
 	sch->q.qlen++;
+	sch->qstats.requeues++;
 	return NET_XMIT_SUCCESS;
 }
 
--- linux-2.6.9-rc5.orig/net/sched/sch_htb.c	2004-10-21 10:45:08.000000000 +0200
+++ linux-2.6.9-rc5/net/sched/sch_htb.c	2004-10-21 10:52:50.000000000 +0200
@@ -794,6 +794,7 @@
 	    htb_activate (q,cl);
 
     sch->q.qlen++;
+    sch->qstats.requeues++;
     HTB_DBG(1,1,"htb_req_ok cl=%X skb=%p\n",(cl && cl != HTB_DIRECT)?cl->classid:0,skb);
     return NET_XMIT_SUCCESS;
 }
--- linux-2.6.9-rc5.orig/net/sched/sch_netem.c	2004-10-21 10:45:08.000000000 +0200
+++ linux-2.6.9-rc5/net/sched/sch_netem.c	2004-10-21 10:52:50.000000000 +0200
@@ -211,8 +211,10 @@
 	struct netem_sched_data *q = qdisc_priv(sch);
 	int ret;
 
-	if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0)
+	if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) {
 		sch->q.qlen++;
+		sch->qstats.requeues++;
+	}
 
 	return ret;
 }
--- linux-2.6.9-rc5.orig/net/sched/sch_prio.c	2004-10-21 10:45:08.000000000 +0200
+++ linux-2.6.9-rc5/net/sched/sch_prio.c	2004-10-21 10:52:50.000000000 +0200
@@ -139,6 +139,7 @@
 
 	if ((ret = qdisc->ops->requeue(skb, qdisc)) == 0) {
 		sch->q.qlen++;
+		sch->qstats.requeues++;
 		return 0;
 	}
 dropped:
--- linux-2.6.9-rc5.orig/net/sched/sch_red.c	2004-10-21 10:45:08.000000000 +0200
+++ linux-2.6.9-rc5/net/sched/sch_red.c	2004-10-21 10:52:50.000000000 +0200
@@ -309,6 +309,7 @@
 
 	__skb_queue_head(&sch->q, skb);
 	sch->qstats.backlog += skb->len;
+	sch->qstats.requeues++;
 	return 0;
 }
 
--- linux-2.6.9-rc5.orig/net/sched/sch_sfq.c	2004-10-21 10:45:08.000000000 +0200
+++ linux-2.6.9-rc5/net/sched/sch_sfq.c	2004-10-21 10:52:50.000000000 +0200
@@ -310,8 +310,10 @@
 			q->tail = x;
 		}
 	}
-	if (++sch->q.qlen < q->limit - 1)
+	if (++sch->q.qlen < q->limit - 1) {
+		sch->qstats.requeues++;
 		return 0;
+	}
 
 	sch->qstats.drops++;
 	sfq_drop(sch);
--- linux-2.6.9-rc5.orig/net/sched/sch_tbf.c	2004-10-21 10:45:08.000000000 +0200
+++ linux-2.6.9-rc5/net/sched/sch_tbf.c	2004-10-21 10:52:50.000000000 +0200
@@ -166,8 +166,10 @@
 	struct tbf_sched_data *q = qdisc_priv(sch);
 	int ret;
 
-	if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0)
+	if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) {
 		sch->q.qlen++;
+		sch->qstats.requeues++;
+	}
 
 	return ret;
 }
--- linux-2.6.9-rc5.orig/net/sched/sch_teql.c	2004-10-21 10:45:08.000000000 +0200
+++ linux-2.6.9-rc5/net/sched/sch_teql.c	2004-10-21 10:52:50.000000000 +0200
@@ -113,6 +113,7 @@
 	struct teql_sched_data *q = qdisc_priv(sch);
 
 	__skb_queue_head(&q->q, skb);
+	sch->qstats.requeues++;
 	return 0;
 }
 

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [PATCH 2/16] PKT_SCHED: Max TLV types cleanup
  2004-10-21 12:32 [PATCHSET 0/16] More gnet_stats conversions Thomas Graf
  2004-10-21 12:33 ` [PATCH 1/16] PKT_SCHED: Requeues statistics Thomas Graf
@ 2004-10-21 12:34 ` Thomas Graf
  2004-10-21 12:36 ` [PATCH 3/16] PKT_SCHED: Add dump_stats qdisc op Thomas Graf
                   ` (15 subsequent siblings)
  17 siblings, 0 replies; 21+ messages in thread
From: Thomas Graf @ 2004-10-21 12:34 UTC (permalink / raw)
  To: David S. Miller; +Cc: netdev, hadi

Cleans up maximum TLV type of existing definitions and adds
missing definitions for TBF, RED, GRED, and HTB.

Signed-off-by: Thomas Graf <tgraf@suug.ch>


--- linux-2.6.9-rc5.orig/include/linux/pkt_sched.h	2004-10-21 10:44:39.000000000 +0200
+++ linux-2.6.9-rc5/include/linux/pkt_sched.h	2004-10-21 11:09:53.000000000 +0200
@@ -117,8 +117,11 @@
 	TCA_TBF_PARMS,
 	TCA_TBF_RTAB,
 	TCA_TBF_PTAB,
+	__TCA_TBF_MAX,
 };
 
+#define TCA_TBF_MAX (__TCA_TBF_MAX - 1)
+
 
 /* TEQL section */
 
@@ -151,8 +154,11 @@
 	TCA_RED_UNSPEC,
 	TCA_RED_PARMS,
 	TCA_RED_STAB,
+	__TCA_RED_MAX,
 };
 
+#define TCA_RED_MAX (__TCA_RED_MAX - 1)
+
 struct tc_red_qopt
 {
 	__u32		limit;		/* HARD maximal queue length (bytes)	*/
@@ -183,8 +189,11 @@
        TCA_GRED_PARMS,
        TCA_GRED_STAB,
        TCA_GRED_DPS,
+	   __TCA_GRED_MAX,
 };
 
+#define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
+
 #define TCA_SET_OFF TCA_GRED_PARMS
 struct tc_gred_qopt
 {
@@ -249,7 +258,11 @@
 	TCA_HTB_INIT,
 	TCA_HTB_CTAB,
 	TCA_HTB_RTAB,
+	__TCA_HTB_MAX,
 };
+
+#define TCA_HTB_MAX (__TCA_HTB_MAX - 1)
+
 struct tc_htb_xstats
 {
 	__u32 lends;
@@ -287,9 +300,12 @@
 	TCA_HFSC_RSC,
 	TCA_HFSC_FSC,
 	TCA_HFSC_USC,
-	TCA_HFSC_MAX = TCA_HFSC_USC
+	__TCA_HFSC_MAX,
 };
 
+#define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1)
+
+
 /* CBQ section */
 
 #define TC_CBQ_MAXPRIO		8
@@ -370,9 +386,10 @@
 	TCA_CBQ_RATE,
 	TCA_CBQ_RTAB,
 	TCA_CBQ_POLICE,
+	__TCA_CBQ_MAX,
 };
 
-#define TCA_CBQ_MAX	TCA_CBQ_POLICE
+#define TCA_CBQ_MAX	(__TCA_CBQ_MAX - 1)
 
 /* dsmark section */
 
@@ -382,10 +399,11 @@
 	TCA_DSMARK_DEFAULT_INDEX,
 	TCA_DSMARK_SET_TC_INDEX,
 	TCA_DSMARK_MASK,
-	TCA_DSMARK_VALUE
+	TCA_DSMARK_VALUE,
+	__TCA_DSMARK_MAX,
 };
 
-#define TCA_DSMARK_MAX TCA_DSMARK_VALUE
+#define TCA_DSMARK_MAX (__TCA_DSMARK_MAX - 1)
 
 /* ATM  section */
 
@@ -396,10 +414,11 @@
 	TCA_ATM_HDR,		/* LL header */
 	TCA_ATM_EXCESS,		/* excess traffic class (0 for CLP)  */
 	TCA_ATM_ADDR,		/* PVC address (for output only) */
-	TCA_ATM_STATE		/* VC state (ATM_VS_*; for output only) */
+	TCA_ATM_STATE,		/* VC state (ATM_VS_*; for output only) */
+	__TCA_ATM_MAX,
 };
 
-#define TCA_ATM_MAX	TCA_ATM_STATE
+#define TCA_ATM_MAX	(__TCA_ATM_MAX - 1)
 
 /* Network emulator */
 
@@ -408,9 +427,10 @@
 	TCA_NETEM_UNSPEC,
 	TCA_NETEM_CORR,
 	TCA_NETEM_DELAY_DIST,
+	__TCA_NETEM_MAX,
 };
 
-#define TCA_NETEM_MAX	TCA_NETEM_DELAY_DIST
+#define TCA_NETEM_MAX (__TCA_NETEM_MAX - 1)
 
 struct tc_netem_qopt
 {

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [PATCH 3/16] PKT_SCHED: Add dump_stats qdisc op
  2004-10-21 12:32 [PATCHSET 0/16] More gnet_stats conversions Thomas Graf
  2004-10-21 12:33 ` [PATCH 1/16] PKT_SCHED: Requeues statistics Thomas Graf
  2004-10-21 12:34 ` [PATCH 2/16] PKT_SCHED: Max TLV types cleanup Thomas Graf
@ 2004-10-21 12:36 ` Thomas Graf
  2004-10-21 12:37 ` [PATCH 4/16] CBQ: use dump_stats Thomas Graf
                   ` (14 subsequent siblings)
  17 siblings, 0 replies; 21+ messages in thread
From: Thomas Graf @ 2004-10-21 12:36 UTC (permalink / raw)
  To: David S. Miller; +Cc: netdev, hadi

Adds a new qdisc operation dump_stats which qdiscs can use
to dump statistics. The op is invoked between gnet_stats_start_copy
and gnet_stats_finish_copy and therefore stats_lock is already
held. This is required to ensure proper locking throughout the
whole statistic dumping procedure.

Signed-off-by: Thomas Graf <tgraf@suug.ch>

--- linux-2.6.9-rc5.orig/include/net/pkt_sched.h	2004-10-21 10:44:44.000000000 +0200
+++ linux-2.6.9-rc5/include/net/pkt_sched.h	2004-10-21 11:11:45.000000000 +0200
@@ -64,6 +64,7 @@
 	int			(*change)(struct Qdisc *, struct rtattr *arg);
 
 	int			(*dump)(struct Qdisc *, struct sk_buff *);
+	int			(*dump_stats)(struct Qdisc *, struct gnet_dump *);
 
 	struct module		*owner;
 };
--- linux-2.6.9-rc5.orig/net/sched/sch_api.c	2004-10-21 10:45:08.000000000 +0200
+++ linux-2.6.9-rc5/net/sched/sch_api.c	2004-10-21 11:11:45.000000000 +0200
@@ -767,6 +767,9 @@
 			TCA_XSTATS, q->stats_lock, &d) < 0)
 		goto rtattr_failure;
 
+	if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
+		goto rtattr_failure;
+
 	if (gnet_stats_copy_basic(&d, &q->bstats) < 0 ||
 #ifdef CONFIG_NET_ESTIMATOR
 	    gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [PATCH 4/16] CBQ: use dump_stats
  2004-10-21 12:32 [PATCHSET 0/16] More gnet_stats conversions Thomas Graf
                   ` (2 preceding siblings ...)
  2004-10-21 12:36 ` [PATCH 3/16] PKT_SCHED: Add dump_stats qdisc op Thomas Graf
@ 2004-10-21 12:37 ` Thomas Graf
  2004-10-21 12:38 ` [PATCH 5/16] RED: " Thomas Graf
                   ` (13 subsequent siblings)
  17 siblings, 0 replies; 21+ messages in thread
From: Thomas Graf @ 2004-10-21 12:37 UTC (permalink / raw)
  To: David S. Miller; +Cc: netdev, hadi

Makes CBQ use dump_stats qdisc op to provide xstats.

Signed-off-by: Thomas Graf <tgraf@suug.ch>

--- linux-2.6.9-rc5.orig/net/sched/sch_cbq.c	2004-10-21 11:07:46.000000000 +0200
+++ linux-2.6.9-rc5/net/sched/sch_cbq.c	2004-10-21 11:20:06.000000000 +0200
@@ -1633,13 +1633,6 @@
 	if (cbq_dump_attr(skb, &q->link) < 0)
 		goto rtattr_failure;
 	rta->rta_len = skb->tail - b;
-	spin_lock_bh(&sch->dev->queue_lock);
-	q->link.xstats.avgidle = q->link.avgidle;
-	if (cbq_copy_xstats(skb, &q->link.xstats)) {
-		spin_unlock_bh(&sch->dev->queue_lock);
-		goto rtattr_failure;
-	}
-	spin_unlock_bh(&sch->dev->queue_lock);
 	return skb->len;
 
 rtattr_failure:
@@ -1648,6 +1641,15 @@
 }
 
 static int
+cbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+	struct cbq_sched_data *q = qdisc_priv(sch);
+
+	q->link.xstats.avgidle = q->link.avgidle;
+	return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats));
+}
+
+static int
 cbq_dump_class(struct Qdisc *sch, unsigned long arg,
 	       struct sk_buff *skb, struct tcmsg *tcm)
 {
@@ -2133,6 +2135,7 @@
 	.destroy	=	cbq_destroy,
 	.change		=	NULL,
 	.dump		=	cbq_dump,
+	.dump_stats	=	cbq_dump_stats,
 	.owner		=	THIS_MODULE,
 };
 

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [PATCH 5/16] RED: use dump_stats
  2004-10-21 12:32 [PATCHSET 0/16] More gnet_stats conversions Thomas Graf
                   ` (3 preceding siblings ...)
  2004-10-21 12:37 ` [PATCH 4/16] CBQ: use dump_stats Thomas Graf
@ 2004-10-21 12:38 ` Thomas Graf
  2004-10-21 12:39 ` [PATCH 6/16] PKT_SCHED: Add dump_stats class op Thomas Graf
                   ` (12 subsequent siblings)
  17 siblings, 0 replies; 21+ messages in thread
From: Thomas Graf @ 2004-10-21 12:38 UTC (permalink / raw)
  To: David S. Miller; +Cc: netdev, hadi

Makes RED use dump_stats qdisc op.

Signed-off-by: Thomas Graf <tgraf@suug.ch>

--- linux-2.6.9-rc5.orig/net/sched/sch_red.c	2004-10-21 11:07:46.000000000 +0200
+++ linux-2.6.9-rc5/net/sched/sch_red.c	2004-10-21 12:49:10.000000000 +0200
@@ -396,16 +396,6 @@
 	return red_change(sch, opt);
 }
 
-
-int red_copy_xstats(struct sk_buff *skb, struct tc_red_xstats *st)
-{
-        RTA_PUT(skb, TCA_XSTATS, sizeof(*st), st);
-        return 0;
-
-rtattr_failure:
-        return 1;
-}
-
 static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
@@ -425,9 +415,6 @@
 	RTA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
 	rta->rta_len = skb->tail - b;
 
-	if (red_copy_xstats(skb, &q->st))
-		goto rtattr_failure;
-
 	return skb->len;
 
 rtattr_failure:
@@ -435,6 +422,13 @@
 	return -1;
 }
 
+static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+	struct red_sched_data *q = qdisc_priv(sch);
+
+	return gnet_stats_copy_app(d, &q->st, sizeof(q->st));
+}
+
 static struct Qdisc_ops red_qdisc_ops = {
 	.next		=	NULL,
 	.cl_ops		=	NULL,
@@ -448,6 +442,7 @@
 	.reset		=	red_reset,
 	.change		=	red_change,
 	.dump		=	red_dump,
+	.dump_stats	=	red_dump_stats,
 	.owner		=	THIS_MODULE,
 };
 

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [PATCH 6/16] PKT_SCHED: Add dump_stats class op
  2004-10-21 12:32 [PATCHSET 0/16] More gnet_stats conversions Thomas Graf
                   ` (4 preceding siblings ...)
  2004-10-21 12:38 ` [PATCH 5/16] RED: " Thomas Graf
@ 2004-10-21 12:39 ` Thomas Graf
  2004-10-21 12:40 ` [PATCH 7/16] CBQ: Use gnet_stats for class statistics Thomas Graf
                   ` (11 subsequent siblings)
  17 siblings, 0 replies; 21+ messages in thread
From: Thomas Graf @ 2004-10-21 12:39 UTC (permalink / raw)
  To: David S. Miller; +Cc: netdev, hadi

Adds a new class operation dump_stats which classes can use
to dump statistics. Unlike in qdiscs, there are no common
statistics therefore classes must dump all statistics on their
own. The qdisc stats_lock is being used for locking.

Signed-off-by: Thomas Graf <tgraf@suug.ch>

--- linux-2.6.9-rc5.orig/include/net/pkt_sched.h	2004-10-21 11:13:03.000000000 +0200
+++ linux-2.6.9-rc5/include/net/pkt_sched.h	2004-10-21 12:52:11.000000000 +0200
@@ -42,6 +42,7 @@
 
 	/* rtnetlink specific */
 	int			(*dump)(struct Qdisc *, unsigned long, struct sk_buff *skb, struct tcmsg*);
+	int			(*dump_stats)(struct Qdisc *, unsigned long, struct gnet_dump *);
 };
 
 struct module;
--- linux-2.6.9-rc5.orig/net/sched/sch_api.c	2004-10-21 11:13:03.000000000 +0200
+++ linux-2.6.9-rc5/net/sched/sch_api.c	2004-10-21 12:52:11.000000000 +0200
@@ -985,6 +985,8 @@
 	struct tcmsg *tcm;
 	struct nlmsghdr  *nlh;
 	unsigned char	 *b = skb->tail;
+	struct gnet_dump d;
+	struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
 
 	nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(*tcm));
 	nlh->nlmsg_flags = flags;
@@ -995,8 +997,19 @@
 	tcm->tcm_handle = q->handle;
 	tcm->tcm_info = 0;
 	RTA_PUT(skb, TCA_KIND, IFNAMSIZ, q->ops->id);
-	if (q->ops->cl_ops->dump && q->ops->cl_ops->dump(q, cl, skb, tcm) < 0)
+	if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
 		goto rtattr_failure;
+
+	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
+			TCA_XSTATS, q->stats_lock, &d) < 0)
+		goto rtattr_failure;
+
+	if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
+		goto rtattr_failure;
+
+	if (gnet_stats_finish_copy(&d) < 0)
+		goto rtattr_failure;
+
 	nlh->nlmsg_len = skb->tail - b;
 	return skb->len;
 

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [PATCH 7/16] CBQ: Use gnet_stats for class statistics
  2004-10-21 12:32 [PATCHSET 0/16] More gnet_stats conversions Thomas Graf
                   ` (5 preceding siblings ...)
  2004-10-21 12:39 ` [PATCH 6/16] PKT_SCHED: Add dump_stats class op Thomas Graf
@ 2004-10-21 12:40 ` Thomas Graf
  2004-10-21 12:43 ` [PATCH 8/16] CBQ: Use dump_stats for class statistics dumping Thomas Graf
                   ` (10 subsequent siblings)
  17 siblings, 0 replies; 21+ messages in thread
From: Thomas Graf @ 2004-10-21 12:40 UTC (permalink / raw)
  To: David S. Miller; +Cc: netdev, hadi

Converts CBQ class statistics counters to gnet_stats
structures.

Signed-off-by: Thomas Graf <tgraf@suug.ch>

--- linux-2.6.9-rc5.orig/net/sched/sch_cbq.c	2004-10-21 12:45:47.000000000 +0200
+++ linux-2.6.9-rc5/net/sched/sch_cbq.c	2004-10-21 12:57:37.000000000 +0200
@@ -146,7 +146,9 @@
 	long			avgidle;
 	long			deficit;	/* Saved deficit for WRR */
 	unsigned long		penalized;
-	struct tc_stats		stats;
+	struct gnet_stats_basic bstats;
+	struct gnet_stats_queue qstats;
+	struct gnet_stats_rate_est rate_est;
 	spinlock_t		*stats_lock;
 	struct tc_cbq_xstats	xstats;
 
@@ -448,7 +450,7 @@
 		kfree_skb(skb);
 	else {
 		cbq_mark_toplevel(q, cl);
-		cl->stats.drops++;
+		cl->qstats.drops++;
 	}
 #else
 	if ( NET_XMIT_DROP == ret) {
@@ -457,7 +459,7 @@
 
 	if (cl != NULL) {
 		cbq_mark_toplevel(q, cl);
-		cl->stats.drops++;
+		cl->qstats.drops++;
 	}
 #endif
 	return ret;
@@ -491,7 +493,7 @@
 		return 0;
 	}
 	sch->qstats.drops++;
-	cl->stats.drops++;
+	cl->qstats.drops++;
 	return ret;
 }
 
@@ -789,8 +791,8 @@
 		long avgidle = cl->avgidle;
 		long idle;
 
-		cl->stats.packets++;
-		cl->stats.bytes += len;
+		cl->bstats.packets++;
+		cl->bstats.bytes += len;
 
 		/*
 		   (now - last) is total time between packet right edges.
@@ -888,7 +890,7 @@
 		   no another solution exists.
 		 */
 		if ((cl = cl->borrow) == NULL) {
-			this_cl->stats.overlimits++;
+			this_cl->qstats.overlimits++;
 			this_cl->overlimit(this_cl);
 			return NULL;
 		}

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [PATCH 8/16] CBQ: Use dump_stats for class statistics dumping
  2004-10-21 12:32 [PATCHSET 0/16] More gnet_stats conversions Thomas Graf
                   ` (6 preceding siblings ...)
  2004-10-21 12:40 ` [PATCH 7/16] CBQ: Use gnet_stats for class statistics Thomas Graf
@ 2004-10-21 12:43 ` Thomas Graf
  2004-10-21 12:44 ` [PATCH 9/16] CBQ: Use generic rate estimator Thomas Graf
                   ` (9 subsequent siblings)
  17 siblings, 0 replies; 21+ messages in thread
From: Thomas Graf @ 2004-10-21 12:43 UTC (permalink / raw)
  To: David S. Miller; +Cc: netdev, hadi

Makes CBQ use dump_stats to dump class statistics, i.e.
moves the statistic dump code from cbq_dump_class to
cbq_dump_class_stats.

Signed-off-by: Thomas Graf <tgraf@suug.ch>

--- linux-2.6.9-rc5.orig/net/sched/sch_cbq.c	2004-10-21 12:59:00.000000000 +0200
+++ linux-2.6.9-rc5/net/sched/sch_cbq.c	2004-10-21 12:59:32.000000000 +0200
@@ -1614,16 +1614,6 @@
 	return 0;
 }
 
-int cbq_copy_xstats(struct sk_buff *skb, struct tc_cbq_xstats *st)
-{
-	RTA_PUT(skb, TCA_XSTATS, sizeof(*st), st);
-	return 0;
-
-rtattr_failure:
-	return -1;
-}
-
-
 static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
 {
 	struct cbq_sched_data *q = qdisc_priv(sch);
@@ -1655,7 +1645,6 @@
 cbq_dump_class(struct Qdisc *sch, unsigned long arg,
 	       struct sk_buff *skb, struct tcmsg *tcm)
 {
-	struct cbq_sched_data *q = qdisc_priv(sch);
 	struct cbq_class *cl = (struct cbq_class*)arg;
 	unsigned char	 *b = skb->tail;
 	struct rtattr *rta;
@@ -1672,25 +1661,35 @@
 	if (cbq_dump_attr(skb, cl) < 0)
 		goto rtattr_failure;
 	rta->rta_len = skb->tail - b;
-	cl->stats.qlen = cl->q->q.qlen;
-	if (qdisc_copy_stats(skb, &cl->stats, cl->stats_lock))
-		goto rtattr_failure;
-	spin_lock_bh(&sch->dev->queue_lock);
+	return skb->len;
+
+rtattr_failure:
+	skb_trim(skb, b - skb->data);
+	return -1;
+}
+
+static int
+cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
+	struct gnet_dump *d)
+{
+	struct cbq_sched_data *q = qdisc_priv(sch);
+	struct cbq_class *cl = (struct cbq_class*)arg;
+
+	cl->qstats.qlen = cl->q->q.qlen;
 	cl->xstats.avgidle = cl->avgidle;
 	cl->xstats.undertime = 0;
+
 	if (!PSCHED_IS_PASTPERFECT(cl->undertime))
 		cl->xstats.undertime = PSCHED_TDIFF(cl->undertime, q->now);
-	if (cbq_copy_xstats(skb, &cl->xstats)) {
-		spin_unlock_bh(&sch->dev->queue_lock);
-		goto rtattr_failure;
-	}
-	spin_unlock_bh(&sch->dev->queue_lock);
 
-	return skb->len;
+	if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
+#ifdef CONFIG_NET_ESTIMATOR
+	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
+#endif
+	    gnet_stats_copy_queue(d, &cl->qstats) < 0)
+		return -1;
 
-rtattr_failure:
-	skb_trim(skb, b - skb->data);
-	return -1;
+	return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
 }
 
 static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
@@ -2121,6 +2120,7 @@
 	.bind_tcf	=	cbq_bind_filter,
 	.unbind_tcf	=	cbq_unbind_filter,
 	.dump		=	cbq_dump_class,
+	.dump_stats	=	cbq_dump_class_stats,
 };
 
 static struct Qdisc_ops cbq_qdisc_ops = {

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [PATCH 9/16] CBQ: Use generic rate estimator
  2004-10-21 12:32 [PATCHSET 0/16] More gnet_stats conversions Thomas Graf
                   ` (7 preceding siblings ...)
  2004-10-21 12:43 ` [PATCH 8/16] CBQ: Use dump_stats for class statistics dumping Thomas Graf
@ 2004-10-21 12:44 ` Thomas Graf
  2004-10-21 12:45 ` [PATCH 10/16] HTB: Use gnet_stats for class statistics Thomas Graf
                   ` (8 subsequent siblings)
  17 siblings, 0 replies; 21+ messages in thread
From: Thomas Graf @ 2004-10-21 12:44 UTC (permalink / raw)
  To: David S. Miller; +Cc: netdev, hadi

Makes CBQ use the generic rate estimator.

Signed-off-by: Thomas Graf <tgraf@suug.ch>

--- linux-2.6.9-rc5.orig/net/sched/sch_cbq.c	2004-10-21 13:00:53.000000000 +0200
+++ linux-2.6.9-rc5/net/sched/sch_cbq.c	2004-10-21 13:01:20.000000000 +0200
@@ -1759,7 +1759,7 @@
 	qdisc_destroy(cl->q);
 	qdisc_put_rtab(cl->R_tab);
 #ifdef CONFIG_NET_ESTIMATOR
-	qdisc_kill_estimator(&cl->stats);
+	gen_kill_estimator(&cl->bstats, &cl->rate_est);
 #endif
 	if (cl != &q->link)
 		kfree(cl);
@@ -1905,11 +1905,9 @@
 		sch_tree_unlock(sch);
 
 #ifdef CONFIG_NET_ESTIMATOR
-		if (tca[TCA_RATE-1]) {
-			qdisc_kill_estimator(&cl->stats);
-			qdisc_new_estimator(&cl->stats, cl->stats_lock,
-					    tca[TCA_RATE-1]);
-		}
+		if (tca[TCA_RATE-1])
+			gen_replace_estimator(&cl->bstats, &cl->rate_est,
+				cl->stats_lock, tca[TCA_RATE-1]);
 #endif
 		return 0;
 	}
@@ -1999,8 +1997,8 @@
 
 #ifdef CONFIG_NET_ESTIMATOR
 	if (tca[TCA_RATE-1])
-		qdisc_new_estimator(&cl->stats, cl->stats_lock,
-				    tca[TCA_RATE-1]);
+		gen_new_estimator(&cl->bstats, &cl->rate_est,
+			cl->stats_lock, tca[TCA_RATE-1]);
 #endif
 
 	*arg = (unsigned long)cl;

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [PATCH 10/16] HTB: Use gnet_stats for class statistics
  2004-10-21 12:32 [PATCHSET 0/16] More gnet_stats conversions Thomas Graf
                   ` (8 preceding siblings ...)
  2004-10-21 12:44 ` [PATCH 9/16] CBQ: Use generic rate estimator Thomas Graf
@ 2004-10-21 12:45 ` Thomas Graf
  2004-10-21 12:46 ` [PATCH 11/16] HTB: Use dump_stats for class statistics dumping Thomas Graf
                   ` (7 subsequent siblings)
  17 siblings, 0 replies; 21+ messages in thread
From: Thomas Graf @ 2004-10-21 12:45 UTC (permalink / raw)
  To: David S. Miller; +Cc: netdev, hadi

Converts HTB class statistic counters to gnet_stats
structures.

Signed-off-by: Thomas Graf <tgraf@suug.ch>

--- linux-2.6.9-rc5.orig/net/sched/sch_htb.c	2004-10-21 11:07:46.000000000 +0200
+++ linux-2.6.9-rc5/net/sched/sch_htb.c	2004-10-21 13:03:29.000000000 +0200
@@ -142,7 +142,9 @@
 #endif
     /* general class parameters */
     u32 classid;
-    struct tc_stats	stats;	/* generic stats */
+    struct gnet_stats_basic bstats;
+    struct gnet_stats_queue qstats;
+    struct gnet_stats_rate_est rate_est;
     spinlock_t		*stats_lock;
     struct tc_htb_xstats xstats;/* our special stats */
     int refcnt;			/* usage count of this class */
@@ -754,10 +756,10 @@
 #endif
     else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
 	sch->qstats.drops++;
-	cl->stats.drops++;
+	cl->qstats.drops++;
 	return NET_XMIT_DROP;
     } else {
-	cl->stats.packets++; cl->stats.bytes += skb->len;
+	cl->bstats.packets++; cl->bstats.bytes += skb->len;
 	htb_activate (q,cl);
     }
 
@@ -788,7 +790,7 @@
 	}
     } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
 	sch->qstats.drops++;
-	cl->stats.drops++;
+	cl->qstats.drops++;
 	return NET_XMIT_DROP;
     } else 
 	    htb_activate (q,cl);
@@ -906,8 +908,8 @@
 
 		/* update byte stats except for leaves which are already updated */
 		if (cl->level) {
-			cl->stats.bytes += bytes;
-			cl->stats.packets++;
+			cl->bstats.bytes += bytes;
+			cl->bstats.packets++;
 		}
 		cl = cl->parent;
 	}

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [PATCH 11/16] HTB: Use dump_stats for class statistics dumping
  2004-10-21 12:32 [PATCHSET 0/16] More gnet_stats conversions Thomas Graf
                   ` (9 preceding siblings ...)
  2004-10-21 12:45 ` [PATCH 10/16] HTB: Use gnet_stats for class statistics Thomas Graf
@ 2004-10-21 12:46 ` Thomas Graf
  2004-10-21 12:47 ` [PATCH 12/16] HTB: Remove unneeded rate estimator bits Thomas Graf
                   ` (6 subsequent siblings)
  17 siblings, 0 replies; 21+ messages in thread
From: Thomas Graf @ 2004-10-21 12:46 UTC (permalink / raw)
  To: David S. Miller; +Cc: netdev, hadi

Makes HTB use dump_stats to dump class statistics, i.e.
moves the statistics dump code from htb_dump_class to
htb_dump_class_stats.

Signed-off-by: Thomas Graf <tgraf@suug.ch>

--- linux-2.6.9-rc5.orig/net/sched/sch_htb.c	2004-10-21 13:05:35.000000000 +0200
+++ linux-2.6.9-rc5/net/sched/sch_htb.c	2004-10-21 13:06:07.000000000 +0200
@@ -1319,7 +1319,6 @@
 	struct rtattr *rta;
 	struct tc_htb_glob gopt;
 	HTB_DBG(0,1,"htb_dump sch=%p, handle=%X\n",sch,sch->handle);
-	/* stats */
 	HTB_QLOCK(sch);
 	gopt.direct_pkts = q->direct_pkts;
 
@@ -1359,10 +1358,8 @@
 	HTB_QLOCK(sch);
 	tcm->tcm_parent = cl->parent ? cl->parent->classid : TC_H_ROOT;
 	tcm->tcm_handle = cl->classid;
-	if (!cl->level && cl->un.leaf.q) {
+	if (!cl->level && cl->un.leaf.q)
 		tcm->tcm_info = cl->un.leaf.q->handle;
-		cl->stats.qlen = cl->un.leaf.q->q.qlen;
-	}
 
 	rta = (struct rtattr*)b;
 	RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
@@ -1375,16 +1372,6 @@
 	opt.level = cl->level; 
 	RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
 	rta->rta_len = skb->tail - b;
-
-#ifdef HTB_RATECM
-	cl->stats.bps = cl->rate_bytes/(HTB_EWMAC*HTB_HSIZE);
-	cl->stats.pps = cl->rate_packets/(HTB_EWMAC*HTB_HSIZE);
-#endif
-
-	cl->xstats.tokens = cl->tokens;
-	cl->xstats.ctokens = cl->ctokens;
-	RTA_PUT(skb, TCA_STATS, sizeof(cl->stats), &cl->stats);
-	RTA_PUT(skb, TCA_XSTATS, sizeof(cl->xstats), &cl->xstats);
 	HTB_QUNLOCK(sch);
 	return skb->len;
 rtattr_failure:
@@ -1393,6 +1380,30 @@
 	return -1;
 }
 
+static int
+htb_dump_class_stats(struct Qdisc *sch, unsigned long arg,
+	struct gnet_dump *d)
+{
+	struct htb_class *cl = (struct htb_class*)arg;
+
+#ifdef HTB_RATECM
+	cl->rate_est.bps = cl->rate_bytes/(HTB_EWMAC*HTB_HSIZE);
+	cl->rate_est.pps = cl->rate_packets/(HTB_EWMAC*HTB_HSIZE);
+#endif
+
+	if (!cl->level && cl->un.leaf.q)
+		cl->qstats.qlen = cl->un.leaf.q->q.qlen;
+	cl->xstats.tokens = cl->tokens;
+	cl->xstats.ctokens = cl->ctokens;
+
+	if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
+	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
+	    gnet_stats_copy_queue(d, &cl->qstats) < 0)
+		return -1;
+
+	return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
+}
+
 static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
 	struct Qdisc **old)
 {
@@ -1747,6 +1758,7 @@
 	.bind_tcf	=	htb_bind_filter,
 	.unbind_tcf	=	htb_unbind_filter,
 	.dump		=	htb_dump_class,
+	.dump_stats	=	htb_dump_class_stats,
 };
 
 static struct Qdisc_ops htb_qdisc_ops = {

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [PATCH 12/16] HTB: Remove unneeded rate estimator bits
  2004-10-21 12:32 [PATCHSET 0/16] More gnet_stats conversions Thomas Graf
                   ` (10 preceding siblings ...)
  2004-10-21 12:46 ` [PATCH 11/16] HTB: Use dump_stats for class statistics dumping Thomas Graf
@ 2004-10-21 12:47 ` Thomas Graf
  2004-10-21 12:48 ` [PATCH 13/16] HFSC: Use gnet_stats for class statistics Thomas Graf
                   ` (5 subsequent siblings)
  17 siblings, 0 replies; 21+ messages in thread
From: Thomas Graf @ 2004-10-21 12:47 UTC (permalink / raw)
  To: David S. Miller; +Cc: netdev, hadi

Removes old generic unused rate estimator bits. HTB has its own
rate estimator routines and therefore no rate estimator is created
so there is no need to try and delete one. We might want to convert
it to the generic rate estimator at a later point.

Signed-off-by: Thomas Graf <tgraf@suug.ch>

--- linux-2.6.9-rc5.orig/net/sched/sch_htb.c	2004-10-21 13:08:24.000000000 +0200
+++ linux-2.6.9-rc5/net/sched/sch_htb.c	2004-10-21 13:08:54.000000000 +0200
@@ -145,7 +145,6 @@
     struct gnet_stats_basic bstats;
     struct gnet_stats_queue qstats;
     struct gnet_stats_rate_est rate_est;
-    spinlock_t		*stats_lock;
     struct tc_htb_xstats xstats;/* our special stats */
     int refcnt;			/* usage count of this class */
 
@@ -1468,9 +1467,6 @@
 	qdisc_put_rtab(cl->rate);
 	qdisc_put_rtab(cl->ceil);
 	
-#ifdef CONFIG_NET_ESTIMATOR
-	qdisc_kill_estimator(&cl->stats);
-#endif
 	htb_destroy_filters (&cl->filter_list);
 	
 	while (!list_empty(&cl->children)) 

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [PATCH 13/16] HFSC: Use gnet_stats for class statistics
  2004-10-21 12:32 [PATCHSET 0/16] More gnet_stats conversions Thomas Graf
                   ` (11 preceding siblings ...)
  2004-10-21 12:47 ` [PATCH 12/16] HTB: Remove unneeded rate estimator bits Thomas Graf
@ 2004-10-21 12:48 ` Thomas Graf
  2004-10-21 12:49 ` [PATCH 14/16] HFSC: Use generic rate estimator Thomas Graf
                   ` (4 subsequent siblings)
  17 siblings, 0 replies; 21+ messages in thread
From: Thomas Graf @ 2004-10-21 12:48 UTC (permalink / raw)
  To: David S. Miller; +Cc: netdev, hadi

Converts HTB class statistic counters to gnet_stats
structures.

Signed-off-by: Thomas Graf <tgraf@suug.ch>

--- linux-2.6.9-rc5.orig/net/sched/sch_hfsc.c	2004-10-21 11:07:46.000000000 +0200
+++ linux-2.6.9-rc5/net/sched/sch_hfsc.c	2004-10-21 13:11:06.000000000 +0200
@@ -122,7 +122,9 @@
 	u32		classid;	/* class id */
 	unsigned int	refcnt;		/* usage count */
 
-	struct tc_stats	stats;		/* generic statistics */
+	struct gnet_stats_basic bstats;
+	struct gnet_stats_queue qstats;
+	struct gnet_stats_rate_est rate_est;
 	spinlock_t	*stats_lock;
 	unsigned int	level;		/* class level in hierarchy */
 	struct tcf_proto *filter_list;	/* filter list */
@@ -1686,7 +1688,7 @@
 
 	err = cl->qdisc->enqueue(skb, cl->qdisc);
 	if (unlikely(err != NET_XMIT_SUCCESS)) {
-		cl->stats.drops++;
+		cl->qstats.drops++;
 		sch->qstats.drops++;
 		return err;
 	}
@@ -1694,8 +1696,8 @@
 	if (cl->qdisc->q.qlen == 1)
 		set_active(cl, len);
 
-	cl->stats.packets++;
-	cl->stats.bytes += len;
+	cl->bstats.packets++;
+	cl->bstats.bytes += len;
 	sch->bstats.packets++;
 	sch->bstats.bytes += len;
 	sch->q.qlen++;
@@ -1799,7 +1801,7 @@
 			} else {
 				list_move_tail(&cl->dlist, &q->droplist);
 			}
-			cl->stats.drops++;
+			cl->qstats.drops++;
 			sch->qstats.drops++;
 			sch->q.qlen--;
 			return len;

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [PATCH 14/16] HFSC: Use generic rate estimator
  2004-10-21 12:32 [PATCHSET 0/16] More gnet_stats conversions Thomas Graf
                   ` (12 preceding siblings ...)
  2004-10-21 12:48 ` [PATCH 13/16] HFSC: Use gnet_stats for class statistics Thomas Graf
@ 2004-10-21 12:49 ` Thomas Graf
  2004-10-21 12:51 ` [PATCH 15/16] HFSC: Use dump_stats for class statistics dumping Thomas Graf
                   ` (3 subsequent siblings)
  17 siblings, 0 replies; 21+ messages in thread
From: Thomas Graf @ 2004-10-21 12:49 UTC (permalink / raw)
  To: David S. Miller; +Cc: netdev, hadi

Makes HFSC use the generic rate estimator.

Signed-off-by: Thomas Graf <tgraf@suug.ch>

--- linux-2.6.9-rc5.orig/net/sched/sch_hfsc.c	2004-10-21 13:12:24.000000000 +0200
+++ linux-2.6.9-rc5/net/sched/sch_hfsc.c	2004-10-21 13:12:53.000000000 +0200
@@ -1100,11 +1100,9 @@
 		sch_tree_unlock(sch);
 
 #ifdef CONFIG_NET_ESTIMATOR
-		if (tca[TCA_RATE-1]) {
-			qdisc_kill_estimator(&cl->stats);
-			qdisc_new_estimator(&cl->stats, cl->stats_lock,
-					    tca[TCA_RATE-1]);
-		}
+		if (tca[TCA_RATE-1])
+			gen_replace_estimator(&cl->bstats, &cl->rate_est,
+				cl->stats_lock, tca[TCA_RATE-1]);
 #endif
 		return 0;
 	}
@@ -1162,8 +1160,8 @@
 
 #ifdef CONFIG_NET_ESTIMATOR
 	if (tca[TCA_RATE-1])
-		qdisc_new_estimator(&cl->stats, cl->stats_lock,
-				    tca[TCA_RATE-1]);
+		gen_new_estimator(&cl->bstats, &cl->rate_est,
+			cl->stats_lock, tca[TCA_RATE-1]);
 #endif
 	*arg = (unsigned long)cl;
 	return 0;
@@ -1188,7 +1186,7 @@
 	hfsc_destroy_filters(&cl->filter_list);
 	qdisc_destroy(cl->qdisc);
 #ifdef CONFIG_NET_ESTIMATOR
-	qdisc_kill_estimator(&cl->stats);
+	gen_kill_estimator(&cl->bstats, &cl->rate_est);
 #endif
 	if (cl != &q->root)
 		kfree(cl);

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [PATCH 15/16] HFSC: Use dump_stats for class statistics dumping
  2004-10-21 12:32 [PATCHSET 0/16] More gnet_stats conversions Thomas Graf
                   ` (13 preceding siblings ...)
  2004-10-21 12:49 ` [PATCH 14/16] HFSC: Use generic rate estimator Thomas Graf
@ 2004-10-21 12:51 ` Thomas Graf
  2004-10-21 12:52 ` [PATCH 16/16] ATM: Use gnet_stats for class statistics and dump them Thomas Graf
                   ` (2 subsequent siblings)
  17 siblings, 0 replies; 21+ messages in thread
From: Thomas Graf @ 2004-10-21 12:51 UTC (permalink / raw)
  To: David S. Miller; +Cc: netdev, hadi

Makes HFSC use dump_stats to dump class statistics, i.e.
moves the statistics dump code from hfsc_dump_class to
hfsc_dump_class_stats.

Signed-off-by: Thomas Graf <tgraf@suug.ch>

--- linux-2.6.9-rc5.orig/net/sched/sch_hfsc.c	2004-10-21 13:13:16.000000000 +0200
+++ linux-2.6.9-rc5/net/sched/sch_hfsc.c	2004-10-21 13:13:46.000000000 +0200
@@ -1404,36 +1404,6 @@
 	return -1;
 }
 
-static inline int
-hfsc_dump_stats(struct sk_buff *skb, struct hfsc_class *cl)
-{
-	cl->stats.qlen = cl->qdisc->q.qlen;
-	if (qdisc_copy_stats(skb, &cl->stats, cl->stats_lock) < 0)
-		goto rtattr_failure;
-
-	return skb->len;
-
- rtattr_failure:
-	return -1;
-}
-
-static inline int
-hfsc_dump_xstats(struct sk_buff *skb, struct hfsc_class *cl)
-{
-	struct tc_hfsc_stats xstats;
-
-	xstats.level  = cl->level;
-	xstats.period = cl->cl_vtperiod;
-	xstats.work   = cl->cl_total;
-	xstats.rtwork = cl->cl_cumul;
-	RTA_PUT(skb, TCA_XSTATS, sizeof(xstats), &xstats);
-
-	return skb->len;
-
- rtattr_failure:
-	return -1;
-}
-
 static int
 hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
                 struct tcmsg *tcm)
@@ -1451,11 +1421,6 @@
 	if (hfsc_dump_curves(skb, cl) < 0)
 		goto rtattr_failure;
 	rta->rta_len = skb->tail - b;
-
-	if ((hfsc_dump_stats(skb, cl) < 0) ||
-	    (hfsc_dump_xstats(skb, cl) < 0))
-		goto rtattr_failure;
-
 	return skb->len;
 
  rtattr_failure:
@@ -1463,6 +1428,31 @@
 	return -1;
 }
 
+static int
+hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
+	struct gnet_dump *d)
+{
+	struct hfsc_class *cl = (struct hfsc_class *)arg;
+	struct tc_hfsc_stats xstats;
+
+	cl->qstats.qlen = cl->qdisc->q.qlen;
+	xstats.level   = cl->level;
+	xstats.period  = cl->cl_vtperiod;
+	xstats.work    = cl->cl_total;
+	xstats.rtwork  = cl->cl_cumul;
+
+	if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
+#ifdef CONFIG_NET_ESTIMATOR
+	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
+#endif
+	    gnet_stats_copy_queue(d, &cl->qstats) < 0)
+		return -1;
+
+	return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
+}
+
+
+
 static void
 hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
 {
@@ -1819,6 +1809,7 @@
 	.unbind_tcf	= hfsc_unbind_tcf,
 	.tcf_chain	= hfsc_tcf_chain,
 	.dump		= hfsc_dump_class,
+	.dump_stats	= hfsc_dump_class_stats,
 	.walk		= hfsc_walk
 };
 

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [PATCH 16/16] ATM: Use gnet_stats for class statistics and dump them
  2004-10-21 12:32 [PATCHSET 0/16] More gnet_stats conversions Thomas Graf
                   ` (14 preceding siblings ...)
  2004-10-21 12:51 ` [PATCH 15/16] HFSC: Use dump_stats for class statistics dumping Thomas Graf
@ 2004-10-21 12:52 ` Thomas Graf
  2004-10-21 15:42   ` [RESEND " Thomas Graf
  2004-10-21 19:42 ` [PATCH] iproute2: support generic statistics and add requeues statistics Thomas Graf
  2004-10-22  5:48 ` [PATCHSET 0/16] More gnet_stats conversions David S. Miller
  17 siblings, 1 reply; 21+ messages in thread
From: Thomas Graf @ 2004-10-21 12:52 UTC (permalink / raw)
  To: David S. Miller; +Cc: netdev, hadi

Makes ATM qdisc use gnet_stats for class statistic counters and
adds dumping bits to actually dump those. Althought the counters
were updated they never got dumped to userspace.

Signed-off-by: Thomas Graf <tgraf@suug.ch>

--- linux-2.6.9-rc5.orig/net/sched/sch_atm.c	2004-10-21 11:07:46.000000000 +0200
+++ linux-2.6.9-rc5/net/sched/sch_atm.c	2004-10-21 13:26:05.000000000 +0200
@@ -69,7 +69,8 @@
 	struct socket		*sock;		/* for closing */
 	u32			classid;	/* x:y type ID */
 	int			ref;		/* reference count */
-	struct tc_stats		stats;
+	struct gnet_stats_basic	bstats;
+	struct gnet_stats_queue	qstats;
 	spinlock_t		*stats_lock;
 	struct atm_flow_data	*next;
 	struct atm_flow_data	*excess;	/* flow for excess traffic;
@@ -450,13 +451,13 @@
 #endif
 	    (ret = flow->q->enqueue(skb,flow->q)) != 0) {
 		sch->qstats.drops++;
-		if (flow) flow->stats.drops++;
+		if (flow) flow->qstats.drops++;
 		return ret;
 	}
 	sch->bstats.bytes += skb->len;
 	sch->bstats.packets++;
-	flow->stats.bytes += skb->len;
-	flow->stats.packets++;
+	flow->bstats.bytes += skb->len;
+	flow->bstats.packets++;
 	/*
 	 * Okay, this may seem weird. We pretend we've dropped the packet if
 	 * it goes via ATM. The reason for this is that the outer qdisc
@@ -550,7 +551,7 @@
         sch->qstats.requeues++;
     } else {
 		sch->qstats.drops++;
-		p->link.stats.drops++;
+		p->link.qstats.drops++;
 	}
 	return ret;
 }
@@ -666,6 +667,21 @@
 	skb_trim(skb,b-skb->data);
 	return -1;
 }
+static int
+atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
+	struct gnet_dump *d)
+{
+	struct atm_qdisc_data *p = PRIV(sch);
+	struct atm_flow_data *flow = (struct atm_flow_data *) arg;
+
+	flow->qstats.qlen = flow->q->q.qlen;
+
+	if (gnet_stats_copy_basic(d, &flow->bstats) < 0 ||
+	    gnet_stats_copy_queue(d, &flow->qstats) < 0)
+		return -1;
+
+	return 0;
+}
 
 static int atm_tc_dump(struct Qdisc *sch, struct sk_buff *skb)
 {
@@ -684,6 +700,7 @@
 	.bind_tcf	=	atm_tc_bind_filter,
 	.unbind_tcf	=	atm_tc_put,
 	.dump		=	atm_tc_dump_class,
+	.dump_stats	=	atm_tc_dump_class_stats,
 };
 
 static struct Qdisc_ops atm_qdisc_ops = {

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [RESEND 16/16] ATM: Use gnet_stats for class statistics and dump them
  2004-10-21 12:52 ` [PATCH 16/16] ATM: Use gnet_stats for class statistics and dump them Thomas Graf
@ 2004-10-21 15:42   ` Thomas Graf
  0 siblings, 0 replies; 21+ messages in thread
From: Thomas Graf @ 2004-10-21 15:42 UTC (permalink / raw)
  To: David S. Miller; +Cc: netdev, hadi

Braindead as I am, I forgot to tell my text editor to write the file
before diffing it. This way an unused variable made it into the patch,
revised version:

Makes ATM qdisc use gnet_stats for class statistic counters and
adds dumping bits to actually dump those. Althought the counters
were updated they never got dumped to userspace.

Signed-off-by: Thomas Graf <tgraf@suug.ch>

--- linux-2.6.9-bk6.orig/net/sched/sch_atm.c	2004-10-21 17:30:11.000000000 +0200
+++ linux-2.6.9-bk6/net/sched/sch_atm.c	2004-10-21 17:29:28.000000000 +0200
@@ -69,7 +69,8 @@
 	struct socket		*sock;		/* for closing */
 	u32			classid;	/* x:y type ID */
 	int			ref;		/* reference count */
-	struct tc_stats		stats;
+	struct gnet_stats_basic	bstats;
+	struct gnet_stats_queue	qstats;
 	spinlock_t		*stats_lock;
 	struct atm_flow_data	*next;
 	struct atm_flow_data	*excess;	/* flow for excess traffic;
@@ -450,13 +451,13 @@
 #endif
 	    (ret = flow->q->enqueue(skb,flow->q)) != 0) {
 		sch->qstats.drops++;
-		if (flow) flow->stats.drops++;
+		if (flow) flow->qstats.drops++;
 		return ret;
 	}
 	sch->bstats.bytes += skb->len;
 	sch->bstats.packets++;
-	flow->stats.bytes += skb->len;
-	flow->stats.packets++;
+	flow->bstats.bytes += skb->len;
+	flow->bstats.packets++;
 	/*
 	 * Okay, this may seem weird. We pretend we've dropped the packet if
 	 * it goes via ATM. The reason for this is that the outer qdisc
@@ -550,7 +551,7 @@
         sch->qstats.requeues++;
     } else {
 		sch->qstats.drops++;
-		p->link.stats.drops++;
+		p->link.qstats.drops++;
 	}
 	return ret;
 }
@@ -666,6 +667,20 @@
 	skb_trim(skb,b-skb->data);
 	return -1;
 }
+static int
+atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
+	struct gnet_dump *d)
+{
+	struct atm_flow_data *flow = (struct atm_flow_data *) arg;
+
+	flow->qstats.qlen = flow->q->q.qlen;
+
+	if (gnet_stats_copy_basic(d, &flow->bstats) < 0 ||
+	    gnet_stats_copy_queue(d, &flow->qstats) < 0)
+		return -1;
+
+	return 0;
+}
 
 static int atm_tc_dump(struct Qdisc *sch, struct sk_buff *skb)
 {
@@ -684,6 +699,7 @@
 	.bind_tcf	=	atm_tc_bind_filter,
 	.unbind_tcf	=	atm_tc_put,
 	.dump		=	atm_tc_dump_class,
+	.dump_stats	=	atm_tc_dump_class_stats,
 };
 
 static struct Qdisc_ops atm_qdisc_ops = {

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [PATCH] iproute2: support generic statistics and add requeues statistics
  2004-10-21 12:32 [PATCHSET 0/16] More gnet_stats conversions Thomas Graf
                   ` (15 preceding siblings ...)
  2004-10-21 12:52 ` [PATCH 16/16] ATM: Use gnet_stats for class statistics and dump them Thomas Graf
@ 2004-10-21 19:42 ` Thomas Graf
  2004-10-22  5:48 ` [PATCHSET 0/16] More gnet_stats conversions David S. Miller
  17 siblings, 0 replies; 21+ messages in thread
From: Thomas Graf @ 2004-10-21 19:42 UTC (permalink / raw)
  To: Stephen Hemminger; +Cc: netdev, hadi

Stephen,

Here is a patch for iproute2 adding support for the new statistic
interface while still being backward compatible to the old statistic
TLVs. Contains the following changes:

o Add linux/gen_stats.h to header files
o Update linux/rtnetlink.h to contain TCA_STATS2 TLV type
o Modify print_tcstats_attr to take a TLV array, prefix string and
  xstats TLV result pointer. It will search for TCA_STATS2 in the
  TLV array and eventually fall back to TCA_STATS. If TCA_STATS2 is
  present it will look for TCA_STATS_APP and if present use it as
  xstats, otherwise it falls back to TCA_XSTATS. The prefix is
  printed on every new line to allow indenting properly.
o Modify all callers to print_tcstats_attr and make them provide the
  new attributes and a xstats result pointer if needed. The xstats
  result pointer is checked for != NULL and used if true, otherwise it
  falls back to TCA_XSTATS to handle the unlikely case when TCA_STATS
  and TCA_STATS2 are not present but TCA_XSTATS is.

I tried to stick to the old dumping format to not break too many
scripts with the new requeues statistics.

diff -Nru iproute2-2.6.9.orig/include/linux/gen_stats.h iproute2-2.6.9/include/linux/gen_stats.h
--- iproute2-2.6.9.orig/include/linux/gen_stats.h	1970-01-01 01:00:00.000000000 +0100
+++ iproute2-2.6.9/include/linux/gen_stats.h	2004-10-21 18:17:31.000000000 +0200
@@ -0,0 +1,62 @@
+#ifndef __LINUX_GEN_STATS_H
+#define __LINUX_GEN_STATS_H
+
+#include <linux/types.h>
+
+enum {
+	TCA_STATS_UNSPEC,
+	TCA_STATS_BASIC,
+	TCA_STATS_RATE_EST,
+	TCA_STATS_QUEUE,
+	TCA_STATS_APP,
+	__TCA_STATS_MAX,
+};
+#define TCA_STATS_MAX (__TCA_STATS_MAX - 1)
+
+/**
+ * @bytes: number of seen bytes
+ * @packets: number of seen packets
+ */
+struct gnet_stats_basic
+{
+	__u64	bytes;
+	__u32	packets;
+};
+
+/**
+ * @bps: current byte rate
+ * @pps: current packet rate
+ */
+struct gnet_stats_rate_est
+{
+	__u32	bps;
+	__u32	pps;
+};
+
+/**
+ * @qlen: queue length
+ * @backlog: backlog size of queue
+ * @drops: number of dropped packets
+ * @requeues: number of requeues
+ */
+struct gnet_stats_queue
+{
+	__u32	qlen;
+	__u32	backlog;
+	__u32	drops;
+	__u32	requeues;
+	__u32	overlimits;
+};
+
+/**
+ * @interval: sampling period
+ * @ewma_log: the log of measurement window weight
+ */
+struct gnet_estimator
+{
+	signed char	interval;
+	unsigned char	ewma_log;
+};
+
+
+#endif /* __LINUX_GEN_STATS_H */
diff -Nru iproute2-2.6.9.orig/include/linux/rtnetlink.h iproute2-2.6.9/include/linux/rtnetlink.h
--- iproute2-2.6.9.orig/include/linux/rtnetlink.h	2004-10-19 22:49:02.000000000 +0200
+++ iproute2-2.6.9/include/linux/rtnetlink.h	2004-10-21 18:17:44.000000000 +0200
@@ -698,6 +698,7 @@
 	TCA_XSTATS,
 	TCA_RATE,
 	TCA_FCNT,
+	TCA_STATS2,
 	__TCA_MAX
 };
 
diff -Nru iproute2-2.6.9.orig/tc/m_action.c iproute2-2.6.9/tc/m_action.c
--- iproute2-2.6.9.orig/tc/m_action.c	2004-10-19 22:49:02.000000000 +0200
+++ iproute2-2.6.9/tc/m_action.c	2004-10-21 21:24:04.000000000 +0200
@@ -261,10 +261,11 @@
 	if (0 > err)
 		return err;
 
-	if (show_stats && tb[TCA_STATS]) {
-		fprintf(f, "\t");
-		print_tcstats_attr(f, tb[TCA_STATS]);
-		fprintf(f, "\n");
+	if (show_stats) {
+		if (tb[TCA_STATS] || tb[TCA_STATS2]) {
+			print_tcstats_attr(f, tb, "\t", NULL);
+			fprintf(f, "\n");
+		}
 	}
 
 	return 0;
diff -Nru iproute2-2.6.9.orig/tc/tc_class.c iproute2-2.6.9/tc/tc_class.c
--- iproute2-2.6.9.orig/tc/tc_class.c	2004-10-19 22:49:02.000000000 +0200
+++ iproute2-2.6.9/tc/tc_class.c	2004-10-21 21:21:33.000000000 +0200
@@ -216,12 +216,14 @@
 	}
 	fprintf(fp, "\n");
 	if (show_stats) {
-		if (tb[TCA_STATS]) {
-			print_tcstats_attr(fp, tb[TCA_STATS]);
+		struct rtattr *xstats = NULL;
+		
+		if (tb[TCA_STATS] || tb[TCA_STATS2]) {
+			print_tcstats_attr(fp, tb, " ", &xstats);
 			fprintf(fp, "\n");
 		}
-		if (q && tb[TCA_XSTATS] && q->print_xstats) {
-			q->print_xstats(q, fp, tb[TCA_XSTATS]);
+		if (q && (xstats || tb[TCA_XSTATS]) && q->print_xstats) {
+			q->print_xstats(q, fp, xstats ? : tb[TCA_XSTATS]);
 			fprintf(fp, "\n");
 		}
 	}
diff -Nru iproute2-2.6.9.orig/tc/tc_filter.c iproute2-2.6.9/tc/tc_filter.c
--- iproute2-2.6.9.orig/tc/tc_filter.c	2004-10-19 22:49:02.000000000 +0200
+++ iproute2-2.6.9/tc/tc_filter.c	2004-10-21 21:22:27.000000000 +0200
@@ -254,8 +254,8 @@
 	}
 	fprintf(fp, "\n");
 
-	if (show_stats && tb[TCA_STATS]) {
-		print_tcstats_attr(fp, tb[TCA_STATS]);
+	if (show_stats && (tb[TCA_STATS] || tb[TCA_STATS2])) {
+		print_tcstats_attr(fp, tb, " ", NULL);
 		fprintf(fp, "\n");
 	}
 
diff -Nru iproute2-2.6.9.orig/tc/tc_qdisc.c iproute2-2.6.9/tc/tc_qdisc.c
--- iproute2-2.6.9.orig/tc/tc_qdisc.c	2004-10-19 22:49:02.000000000 +0200
+++ iproute2-2.6.9/tc/tc_qdisc.c	2004-10-21 21:22:56.000000000 +0200
@@ -168,36 +168,88 @@
 }
 
 
-void print_tcstats_attr(FILE *fp, const struct rtattr *rta)
+void print_tcstats_attr(FILE *fp, struct rtattr *tb[], char *prefix, struct rtattr **xstats)
 {
-	struct tc_stats st;
 	SPRINT_BUF(b1);
 
-	/* handle case where kernel returns more/less than we know about */
-	memset(&st, 0, sizeof(st));
-	memcpy(&st, RTA_DATA(rta), MIN(RTA_PAYLOAD(rta), sizeof(st)));
-
-	fprintf(fp, " Sent %llu bytes %u pkts (dropped %u, overlimits %u) ",
-		(unsigned long long)st.bytes, st.packets, st.drops, 
-		st.overlimits);
-
-	if (st.bps || st.pps || st.qlen || st.backlog) {
-		fprintf(fp, "\n ");
-		if (st.bps || st.pps) {
-			fprintf(fp, "rate ");
-			if (st.bps)
-				fprintf(fp, "%s ", sprint_rate(st.bps, b1));
-			if (st.pps)
-				fprintf(fp, "%upps ", st.pps);
-		}
-		if (st.qlen || st.backlog) {
-			fprintf(fp, "backlog ");
-			if (st.backlog)
-				fprintf(fp, "%s ", sprint_size(st.backlog, b1));
-			if (st.qlen)
-				fprintf(fp, "%up ", st.qlen);
+	if (tb[TCA_STATS2]) {
+		struct rtattr *tbs[TCA_STATS_MAX + 1] = {0};
+
+		parse_rtattr(tbs, TCA_STATS_MAX, RTA_DATA(tb[TCA_STATS2]),
+			RTA_PAYLOAD(tb[TCA_STATS2]));
+
+		if (tbs[TCA_STATS_BASIC]) {
+			struct gnet_stats_basic bs = {0};
+			memcpy(&bs, RTA_DATA(tbs[TCA_STATS_BASIC]), MIN(RTA_PAYLOAD(tbs[TCA_STATS_BASIC]), sizeof(bs)));
+			fprintf(fp, "%sSent %llu bytes %u pkt",
+				prefix, bs.bytes, bs.packets);
+		}
+
+		if (tbs[TCA_STATS_QUEUE]) {
+			struct gnet_stats_queue q = {0};
+			memcpy(&q, RTA_DATA(tbs[TCA_STATS_QUEUE]), MIN(RTA_PAYLOAD(tbs[TCA_STATS_QUEUE]), sizeof(q)));
+			fprintf(fp, " (dropped %u, overlimits %u requeues %u) ",
+				q.drops, q.overlimits, q.requeues);
+		}
+				
+		if (tbs[TCA_STATS_RATE_EST]) {
+			struct gnet_stats_rate_est re = {0};
+			memcpy(&re, RTA_DATA(tbs[TCA_STATS_RATE_EST]), MIN(RTA_PAYLOAD(tbs[TCA_STATS_RATE_EST]), sizeof(re)));
+			fprintf(fp, "\n%srate %s %upps ",
+				prefix, sprint_rate(re.bps, b1), re.pps);
+		}
+
+		if (tbs[TCA_STATS_QUEUE]) {
+			struct gnet_stats_queue q = {0};
+			memcpy(&q, RTA_DATA(tbs[TCA_STATS_QUEUE]), MIN(RTA_PAYLOAD(tbs[TCA_STATS_QUEUE]), sizeof(q)));
+			if (!tbs[TCA_STATS_RATE_EST])
+				fprintf(fp, "\n%s", prefix);
+			fprintf(fp, "backlog %s %up requeues %u ",
+				sprint_size(q.backlog, b1), q.qlen, q.requeues);
+		}
+
+		if (tbs[TCA_STATS_APP]) {
+			if (xstats) 
+				*xstats = tbs[TCA_STATS_APP];
+		} else
+			goto compat_xstats;
+
+		return;
+	}
+	/* backward compatibility */
+	if (tb[TCA_STATS]) {
+		struct tc_stats st;
+
+		/* handle case where kernel returns more/less than we know about */
+		memset(&st, 0, sizeof(st));
+		memcpy(&st, RTA_DATA(tb[TCA_STATS]), MIN(RTA_PAYLOAD(tb[TCA_STATS]), sizeof(st)));
+
+		fprintf(fp, "%sSent %llu bytes %u pkts (dropped %u, overlimits %u) ",
+			prefix, (unsigned long long)st.bytes, st.packets, st.drops, 
+			st.overlimits);
+
+		if (st.bps || st.pps || st.qlen || st.backlog) {
+			fprintf(fp, "\n%s", prefix);
+			if (st.bps || st.pps) {
+				fprintf(fp, "rate ");
+				if (st.bps)
+					fprintf(fp, "%s ", sprint_rate(st.bps, b1));
+				if (st.pps)
+					fprintf(fp, "%upps ", st.pps);
+			}
+			if (st.qlen || st.backlog) {
+				fprintf(fp, "backlog ");
+				if (st.backlog)
+					fprintf(fp, "%s ", sprint_size(st.backlog, b1));
+				if (st.qlen)
+					fprintf(fp, "%up ", st.qlen);
+			}
 		}
 	}
+
+compat_xstats:
+	if (tb[TCA_XSTATS] && xstats)
+		*xstats = tb[TCA_XSTATS];
 }
 
 static int filter_ifindex;
@@ -264,13 +316,15 @@
 	}
 	fprintf(fp, "\n");
 	if (show_stats) {
-		if (tb[TCA_STATS]) {
-			print_tcstats_attr(fp, tb[TCA_STATS]);
+		struct rtattr *xstats = NULL;
+
+		if (tb[TCA_STATS] || tb[TCA_STATS2]) {
+			print_tcstats_attr(fp, tb, " ", &xstats);
 			fprintf(fp, "\n");
 		}
 
-		if (q && tb[TCA_XSTATS] && q->print_xstats) {
-			q->print_xstats(q, fp, tb[TCA_XSTATS]);
+		if (q && (xstats || tb[TCA_XSTATS]) && q->print_xstats) {
+			q->print_xstats(q, fp, xstats ? : tb[TCA_XSTATS]);
 			fprintf(fp, "\n");
 		}
 	}
diff -Nru iproute2-2.6.9.orig/tc/tc_util.h iproute2-2.6.9/tc/tc_util.h
--- iproute2-2.6.9.orig/tc/tc_util.h	2004-10-19 22:49:02.000000000 +0200
+++ iproute2-2.6.9/tc/tc_util.h	2004-10-21 21:15:18.000000000 +0200
@@ -4,6 +4,7 @@
 #define MAX_MSG 16384
 #include <linux/pkt_sched.h>
 #include <linux/pkt_cls.h>
+#include <linux/gen_stats.h>
 #include "tc_core.h"
 
 struct qdisc_util
@@ -58,7 +59,7 @@
 extern char * sprint_usecs(__u32 usecs, char *buf);
 extern char * sprint_percent(__u32 percent, char *buf);
 
-extern void print_tcstats_attr(FILE *fp, const struct rtattr *ts);
+extern void print_tcstats_attr(FILE *fp, struct rtattr *tb[], char *prefix, struct rtattr **xstats);
 
 extern int get_tc_classid(__u32 *h, const char *str);
 extern int print_tc_classid(char *buf, int len, __u32 h);

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCHSET 0/16] More gnet_stats conversions
  2004-10-21 12:32 [PATCHSET 0/16] More gnet_stats conversions Thomas Graf
                   ` (16 preceding siblings ...)
  2004-10-21 19:42 ` [PATCH] iproute2: support generic statistics and add requeues statistics Thomas Graf
@ 2004-10-22  5:48 ` David S. Miller
  2004-10-22 11:08   ` jamal
  17 siblings, 1 reply; 21+ messages in thread
From: David S. Miller @ 2004-10-22  5:48 UTC (permalink / raw)
  To: Thomas Graf; +Cc: netdev, hadi

On Thu, 21 Oct 2004 14:32:09 +0200
Thomas Graf <tgraf@suug.ch> wrote:

> This patchset contains requeues statistics, TLV type max cleanups, and
> more conversions to the new generic statistic interface.
> 
> It converts all qdiscs to dump their xstats via gnet_stats_copy_app and
> converts all class statistics in classful qdiscs to the new API.

Thomas has been a very busy boy :-)

All nice and straightforward transformations, nice work.

All patches applied, thanks Thomas.

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCHSET 0/16] More gnet_stats conversions
  2004-10-22  5:48 ` [PATCHSET 0/16] More gnet_stats conversions David S. Miller
@ 2004-10-22 11:08   ` jamal
  0 siblings, 0 replies; 21+ messages in thread
From: jamal @ 2004-10-22 11:08 UTC (permalink / raw)
  To: David S. Miller; +Cc: Thomas Graf, netdev

On Fri, 2004-10-22 at 01:48, David S. Miller wrote:

> Thomas has been a very busy boy :-)

Yes, he has been ;->

> All nice and straightforward transformations, nice work.

Good work Thomas.

cheers,
jamal

^ permalink raw reply	[flat|nested] 21+ messages in thread

end of thread, other threads:[~2004-10-22 11:08 UTC | newest]

Thread overview: 21+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2004-10-21 12:32 [PATCHSET 0/16] More gnet_stats conversions Thomas Graf
2004-10-21 12:33 ` [PATCH 1/16] PKT_SCHED: Requeues statistics Thomas Graf
2004-10-21 12:34 ` [PATCH 2/16] PKT_SCHED: Max TLV types cleanup Thomas Graf
2004-10-21 12:36 ` [PATCH 3/16] PKT_SCHED: Add dump_stats qdisc op Thomas Graf
2004-10-21 12:37 ` [PATCH 4/16] CBQ: use dump_stats Thomas Graf
2004-10-21 12:38 ` [PATCH 5/16] RED: " Thomas Graf
2004-10-21 12:39 ` [PATCH 6/16] PKT_SCHED: Add dump_stats class op Thomas Graf
2004-10-21 12:40 ` [PATCH 7/16] CBQ: Use gnet_stats for class statistics Thomas Graf
2004-10-21 12:43 ` [PATCH 8/16] CBQ: Use dump_stats for class statistics dumping Thomas Graf
2004-10-21 12:44 ` [PATCH 9/16] CBQ: Use generic rate estimator Thomas Graf
2004-10-21 12:45 ` [PATCH 10/16] HTB: Use gnet_stats for class statistics Thomas Graf
2004-10-21 12:46 ` [PATCH 11/16] HTB: Use dump_stats for class statistics dumping Thomas Graf
2004-10-21 12:47 ` [PATCH 12/16] HTB: Remove unneeded rate estimator bits Thomas Graf
2004-10-21 12:48 ` [PATCH 13/16] HFSC: Use gnet_stats for class statistics Thomas Graf
2004-10-21 12:49 ` [PATCH 14/16] HFSC: Use generic rate estimator Thomas Graf
2004-10-21 12:51 ` [PATCH 15/16] HFSC: Use dump_stats for class statistics dumping Thomas Graf
2004-10-21 12:52 ` [PATCH 16/16] ATM: Use gnet_stats for class statistics and dump them Thomas Graf
2004-10-21 15:42   ` [RESEND " Thomas Graf
2004-10-21 19:42 ` [PATCH] iproute2: support generic statistics and add requeues statistics Thomas Graf
2004-10-22  5:48 ` [PATCHSET 0/16] More gnet_stats conversions David S. Miller
2004-10-22 11:08   ` jamal

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).