netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [RFC NET_SCHED 00/05]: Pseudo-classful qdisc consolidation
@ 2008-01-20 18:28 Patrick McHardy
  2008-01-20 18:28 ` [RFC NET_SCHED 01/05]: Consolidate default fifo setup Patrick McHardy
                   ` (4 more replies)
  0 siblings, 5 replies; 7+ messages in thread
From: Patrick McHardy @ 2008-01-20 18:28 UTC (permalink / raw)
  To: netdev; +Cc: Patrick McHardy

These patches contain some preparatory cleanups and consolidate the
Qdisc_class_ops for pseudo-classful qdiscs. The main reason for
RFC is that the naming is not particular appealing (both qdisc_q_...
and sch_pseudo_classful), suggestions for better names are welcome.


 include/net/pkt_sched.h         |    4 +
 include/net/sch_generic.h       |   78 +++++++++++++-------
 net/core/dev.c                  |    4 +-
 net/sched/Kconfig               |    6 ++
 net/sched/Makefile              |    1 +
 net/sched/sch_atm.c             |   17 +++--
 net/sched/sch_blackhole.c       |    2 +-
 net/sched/sch_cbq.c             |   18 +++--
 net/sched/sch_dsmark.c          |   11 +--
 net/sched/sch_fifo.c            |   62 +++++++++++++---
 net/sched/sch_generic.c         |   12 ++--
 net/sched/sch_gred.c            |   18 ++--
 net/sched/sch_hfsc.c            |   12 ++--
 net/sched/sch_htb.c             |   11 ++-
 net/sched/sch_netem.c           |  157 +++++++--------------------------------
 net/sched/sch_prio.c            |   13 ++-
 net/sched/sch_pseudo_classful.c |  101 +++++++++++++++++++++++++
 net/sched/sch_red.c             |  152 ++++++--------------------------------
 net/sched/sch_sfq.c             |    2 +-
 net/sched/sch_tbf.c             |  150 +++++--------------------------------
 20 files changed, 347 insertions(+), 484 deletions(-)
 create mode 100644 net/sched/sch_pseudo_classful.c

Patrick McHardy (5):
      [NET_SCHED]: Consolidate default fifo setup
      [NET_SCHED]: Rename qdisc helpers for built-in queue
      [NET_SCHED]: Introduce child qdisc helpers
      [NET_SCHED]: Use qdisc helpers
      [NET_SCHED]: Consolidate class ops for pseudo classful qdisc

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [RFC NET_SCHED 01/05]: Consolidate default fifo setup
  2008-01-20 18:28 [RFC NET_SCHED 00/05]: Pseudo-classful qdisc consolidation Patrick McHardy
@ 2008-01-20 18:28 ` Patrick McHardy
  2008-01-20 18:28 ` [RFC NET_SCHED 02/05]: Rename qdisc helpers for built-in queue Patrick McHardy
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 7+ messages in thread
From: Patrick McHardy @ 2008-01-20 18:28 UTC (permalink / raw)
  To: netdev; +Cc: Patrick McHardy

commit e56c933715900be7c6ad30bd07d342d31c457112
Author: Patrick McHardy <kaber@trash.net>
Date:   Wed Jan 2 21:35:19 2008 +0100

    [NET_SCHED]: Consolidate default fifo setup
    
    Signed-off-by: Patrick McHardy <kaber@trash.net>

diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index ab61809..9d06d2d 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -72,6 +72,10 @@ extern void qdisc_watchdog_cancel(struct qdisc_watchdog *wd);
 extern struct Qdisc_ops pfifo_qdisc_ops;
 extern struct Qdisc_ops bfifo_qdisc_ops;
 
+extern int fifo_set_limit(struct Qdisc *q, unsigned int limit);
+extern struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
+				      unsigned int limit);
+
 extern int register_qdisc(struct Qdisc_ops *qops);
 extern int unregister_qdisc(struct Qdisc_ops *qops);
 extern struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index d71dbfc..f9bf58b 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -108,3 +108,45 @@ struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
 
 EXPORT_SYMBOL(bfifo_qdisc_ops);
 EXPORT_SYMBOL(pfifo_qdisc_ops);
+
+/* Pass size change message down to embedded FIFO */
+int fifo_set_limit(struct Qdisc *q, unsigned int limit)
+{
+	struct rtattr *rta;
+	int ret = -ENOMEM;
+
+	/* Hack to avoid sending change message to non-FIFO */
+	if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
+		return 0;
+
+	rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
+	if (rta) {
+		rta->rta_type = RTM_NEWQDISC;
+		rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt));
+		((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit;
+
+		ret = q->ops->change(q, rta);
+		kfree(rta);
+	}
+	return ret;
+}
+EXPORT_SYMBOL(fifo_set_limit);
+
+struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
+			       unsigned int limit)
+{
+	struct Qdisc *q;
+	int err = -ENOMEM;
+
+	q = qdisc_create_dflt(sch->dev, ops, TC_H_MAKE(sch->handle, 1));
+	if (q) {
+		err = fifo_set_limit(q, limit);
+		if (err < 0) {
+			qdisc_destroy(q);
+			q = NULL;
+		}
+	}
+
+	return q ? : ERR_PTR(err);
+}
+EXPORT_SYMBOL(fifo_create_dflt);
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 6c344ad..5342a2f 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -310,28 +310,6 @@ static void netem_reset(struct Qdisc *sch)
 	qdisc_watchdog_cancel(&q->watchdog);
 }
 
-/* Pass size change message down to embedded FIFO */
-static int set_fifo_limit(struct Qdisc *q, int limit)
-{
-	struct rtattr *rta;
-	int ret = -ENOMEM;
-
-	/* Hack to avoid sending change message to non-FIFO */
-	if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
-		return 0;
-
-	rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
-	if (rta) {
-		rta->rta_type = RTM_NEWQDISC;
-		rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt));
-		((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit;
-
-		ret = q->ops->change(q, rta);
-		kfree(rta);
-	}
-	return ret;
-}
-
 /*
  * Distribution data is a variable size payload containing
  * signed 16 bit values.
@@ -414,7 +392,7 @@ static int netem_change(struct Qdisc *sch, struct rtattr *opt)
 		return -EINVAL;
 
 	qopt = RTA_DATA(opt);
-	ret = set_fifo_limit(q->qdisc, qopt->limit);
+	ret = fifo_set_limit(q->qdisc, qopt->limit);
 	if (ret) {
 		pr_debug("netem: can't set fifo limit\n");
 		return ret;
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index f1e9647..699f83d 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -174,33 +174,6 @@ static void red_destroy(struct Qdisc *sch)
 	qdisc_destroy(q->qdisc);
 }
 
-static struct Qdisc *red_create_dflt(struct Qdisc *sch, u32 limit)
-{
-	struct Qdisc *q;
-	struct rtattr *rta;
-	int ret;
-
-	q = qdisc_create_dflt(sch->dev, &bfifo_qdisc_ops,
-			      TC_H_MAKE(sch->handle, 1));
-	if (q) {
-		rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)),
-			      GFP_KERNEL);
-		if (rta) {
-			rta->rta_type = RTM_NEWQDISC;
-			rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt));
-			((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit;
-
-			ret = q->ops->change(q, rta);
-			kfree(rta);
-
-			if (ret == 0)
-				return q;
-		}
-		qdisc_destroy(q);
-	}
-	return NULL;
-}
-
 static int red_change(struct Qdisc *sch, struct rtattr *opt)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
@@ -220,9 +193,9 @@ static int red_change(struct Qdisc *sch, struct rtattr *opt)
 	ctl = RTA_DATA(tb[TCA_RED_PARMS-1]);
 
 	if (ctl->limit > 0) {
-		child = red_create_dflt(sch, ctl->limit);
-		if (child == NULL)
-			return -ENOMEM;
+		child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit);
+		if (IS_ERR(child))
+			return PTR_ERR(child);
 	}
 
 	sch_tree_lock(sch);
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index d88fea9..bd34355 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -242,33 +242,6 @@ static void tbf_reset(struct Qdisc* sch)
 	qdisc_watchdog_cancel(&q->watchdog);
 }
 
-static struct Qdisc *tbf_create_dflt_qdisc(struct Qdisc *sch, u32 limit)
-{
-	struct Qdisc *q;
-	struct rtattr *rta;
-	int ret;
-
-	q = qdisc_create_dflt(sch->dev, &bfifo_qdisc_ops,
-			      TC_H_MAKE(sch->handle, 1));
-	if (q) {
-		rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
-		if (rta) {
-			rta->rta_type = RTM_NEWQDISC;
-			rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt));
-			((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit;
-
-			ret = q->ops->change(q, rta);
-			kfree(rta);
-
-			if (ret == 0)
-				return q;
-		}
-		qdisc_destroy(q);
-	}
-
-	return NULL;
-}
-
 static int tbf_change(struct Qdisc* sch, struct rtattr *opt)
 {
 	int err = -EINVAL;
@@ -312,8 +285,9 @@ static int tbf_change(struct Qdisc* sch, struct rtattr *opt)
 		goto done;
 
 	if (qopt->limit > 0) {
-		if ((child = tbf_create_dflt_qdisc(sch, qopt->limit)) == NULL)
-			goto done;
+		child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
+		if (IS_ERR(child))
+			return PTR_ERR(child);
 	}
 
 	sch_tree_lock(sch);

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [RFC NET_SCHED 02/05]: Rename qdisc helpers for built-in queue
  2008-01-20 18:28 [RFC NET_SCHED 00/05]: Pseudo-classful qdisc consolidation Patrick McHardy
  2008-01-20 18:28 ` [RFC NET_SCHED 01/05]: Consolidate default fifo setup Patrick McHardy
@ 2008-01-20 18:28 ` Patrick McHardy
  2008-01-20 18:28 ` [RFC NET_SCHED 03/05]: Introduce child qdisc helpers Patrick McHardy
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 7+ messages in thread
From: Patrick McHardy @ 2008-01-20 18:28 UTC (permalink / raw)
  To: netdev; +Cc: Patrick McHardy

commit c1f4198dd24ce854b7d55d0ed23a61d36d7defc9
Author: Patrick McHardy <kaber@trash.net>
Date:   Wed Jan 2 21:35:21 2008 +0100

    [NET_SCHED]: Rename qdisc helpers for built-in queue
    
    Rename all helper functions dealing with the built-in queue of
    struct Qdisc (sch->q) to qdisc_q_... to make the naming more
    consistent and avoid naming clashes with the next patch, which
    introduces a few simple helpers that should logically use those
    names.
    
    Signed-off-by: Patrick McHardy <kaber@trash.net>

diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 60b4b35..3ade673 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -180,8 +180,8 @@ extern struct Qdisc *qdisc_create_dflt(struct net_device *dev,
 extern void tcf_destroy(struct tcf_proto *tp);
 extern void tcf_destroy_chain(struct tcf_proto *fl);
 
-static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
-				       struct sk_buff_head *list)
+static inline int __qdisc_q_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
+					 struct sk_buff_head *list)
 {
 	__skb_queue_tail(list, skb);
 	sch->qstats.backlog += skb->len;
@@ -191,13 +191,13 @@ static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
 	return NET_XMIT_SUCCESS;
 }
 
-static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
+static inline int qdisc_q_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
 {
-	return __qdisc_enqueue_tail(skb, sch, &sch->q);
+	return __qdisc_q_enqueue_tail(skb, sch, &sch->q);
 }
 
-static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
-						   struct sk_buff_head *list)
+static inline struct sk_buff *__qdisc_q_dequeue_head(struct Qdisc *sch,
+						     struct sk_buff_head *list)
 {
 	struct sk_buff *skb = __skb_dequeue(list);
 
@@ -207,13 +207,13 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
 	return skb;
 }
 
-static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
+static inline struct sk_buff *qdisc_q_dequeue_head(struct Qdisc *sch)
 {
-	return __qdisc_dequeue_head(sch, &sch->q);
+	return __qdisc_q_dequeue_head(sch, &sch->q);
 }
 
-static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
-						   struct sk_buff_head *list)
+static inline struct sk_buff *__qdisc_q_dequeue_tail(struct Qdisc *sch,
+						     struct sk_buff_head *list)
 {
 	struct sk_buff *skb = __skb_dequeue_tail(list);
 
@@ -223,13 +223,13 @@ static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
 	return skb;
 }
 
-static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch)
+static inline struct sk_buff *qdisc_q_dequeue_tail(struct Qdisc *sch)
 {
-	return __qdisc_dequeue_tail(sch, &sch->q);
+	return __qdisc_q_dequeue_tail(sch, &sch->q);
 }
 
-static inline int __qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch,
-				  struct sk_buff_head *list)
+static inline int __qdisc_q_requeue(struct sk_buff *skb, struct Qdisc *sch,
+				    struct sk_buff_head *list)
 {
 	__skb_queue_head(list, skb);
 	sch->qstats.backlog += skb->len;
@@ -238,13 +238,13 @@ static inline int __qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch,
 	return NET_XMIT_SUCCESS;
 }
 
-static inline int qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch)
+static inline int qdisc_q_requeue(struct sk_buff *skb, struct Qdisc *sch)
 {
-	return __qdisc_requeue(skb, sch, &sch->q);
+	return __qdisc_q_requeue(skb, sch, &sch->q);
 }
 
-static inline void __qdisc_reset_queue(struct Qdisc *sch,
-				       struct sk_buff_head *list)
+static inline void __qdisc_q_reset(struct Qdisc *sch,
+				   struct sk_buff_head *list)
 {
 	/*
 	 * We do not know the backlog in bytes of this list, it
@@ -253,16 +253,16 @@ static inline void __qdisc_reset_queue(struct Qdisc *sch,
 	skb_queue_purge(list);
 }
 
-static inline void qdisc_reset_queue(struct Qdisc *sch)
+static inline void qdisc_q_reset(struct Qdisc *sch)
 {
-	__qdisc_reset_queue(sch, &sch->q);
+	__qdisc_q_reset(sch, &sch->q);
 	sch->qstats.backlog = 0;
 }
 
-static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
-					      struct sk_buff_head *list)
+static inline unsigned int __qdisc_q_drop(struct Qdisc *sch,
+					  struct sk_buff_head *list)
 {
-	struct sk_buff *skb = __qdisc_dequeue_tail(sch, list);
+	struct sk_buff *skb = __qdisc_q_dequeue_tail(sch, list);
 
 	if (likely(skb != NULL)) {
 		unsigned int len = skb->len;
@@ -273,9 +273,9 @@ static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
 	return 0;
 }
 
-static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
+static inline unsigned int qdisc_q_drop(struct Qdisc *sch)
 {
-	return __qdisc_queue_drop(sch, &sch->q);
+	return __qdisc_q_drop(sch, &sch->q);
 }
 
 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index f9bf58b..e4a4dc2 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -28,7 +28,7 @@ static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
 	struct fifo_sched_data *q = qdisc_priv(sch);
 
 	if (likely(sch->qstats.backlog + skb->len <= q->limit))
-		return qdisc_enqueue_tail(skb, sch);
+		return qdisc_q_enqueue_tail(skb, sch);
 
 	return qdisc_reshape_fail(skb, sch);
 }
@@ -38,7 +38,7 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
 	struct fifo_sched_data *q = qdisc_priv(sch);
 
 	if (likely(skb_queue_len(&sch->q) < q->limit))
-		return qdisc_enqueue_tail(skb, sch);
+		return qdisc_q_enqueue_tail(skb, sch);
 
 	return qdisc_reshape_fail(skb, sch);
 }
@@ -82,11 +82,11 @@ struct Qdisc_ops pfifo_qdisc_ops __read_mostly = {
 	.id		=	"pfifo",
 	.priv_size	=	sizeof(struct fifo_sched_data),
 	.enqueue	=	pfifo_enqueue,
-	.dequeue	=	qdisc_dequeue_head,
-	.requeue	=	qdisc_requeue,
-	.drop		=	qdisc_queue_drop,
+	.dequeue	=	qdisc_q_dequeue_head,
+	.requeue	=	qdisc_q_requeue,
+	.drop		=	qdisc_q_drop,
 	.init		=	fifo_init,
-	.reset		=	qdisc_reset_queue,
+	.reset		=	qdisc_q_reset,
 	.change		=	fifo_init,
 	.dump		=	fifo_dump,
 	.owner		=	THIS_MODULE,
@@ -96,11 +96,11 @@ struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
 	.id		=	"bfifo",
 	.priv_size	=	sizeof(struct fifo_sched_data),
 	.enqueue	=	bfifo_enqueue,
-	.dequeue	=	qdisc_dequeue_head,
-	.requeue	=	qdisc_requeue,
-	.drop		=	qdisc_queue_drop,
+	.dequeue	=	qdisc_q_dequeue_head,
+	.requeue	=	qdisc_q_requeue,
+	.drop		=	qdisc_q_drop,
 	.init		=	fifo_init,
-	.reset		=	qdisc_reset_queue,
+	.reset		=	qdisc_q_reset,
 	.change		=	fifo_init,
 	.dump		=	fifo_dump,
 	.owner		=	THIS_MODULE,
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 9be2f15..6afd59e 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -344,7 +344,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
 
 	if (skb_queue_len(list) < qdisc->dev->tx_queue_len) {
 		qdisc->q.qlen++;
-		return __qdisc_enqueue_tail(skb, qdisc, list);
+		return __qdisc_q_enqueue_tail(skb, qdisc, list);
 	}
 
 	return qdisc_drop(skb, qdisc);
@@ -358,7 +358,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
 	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
 		if (!skb_queue_empty(list + prio)) {
 			qdisc->q.qlen--;
-			return __qdisc_dequeue_head(qdisc, list + prio);
+			return __qdisc_q_dequeue_head(qdisc, list + prio);
 		}
 	}
 
@@ -368,7 +368,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
 static int pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
 {
 	qdisc->q.qlen++;
-	return __qdisc_requeue(skb, qdisc, prio2list(skb, qdisc));
+	return __qdisc_q_requeue(skb, qdisc, prio2list(skb, qdisc));
 }
 
 static void pfifo_fast_reset(struct Qdisc* qdisc)
@@ -377,7 +377,7 @@ static void pfifo_fast_reset(struct Qdisc* qdisc)
 	struct sk_buff_head *list = qdisc_priv(qdisc);
 
 	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
-		__qdisc_reset_queue(qdisc, list + prio);
+		__qdisc_q_reset(qdisc, list + prio);
 
 	qdisc->qstats.backlog = 0;
 	qdisc->q.qlen = 0;
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index e2bcd66..d933565 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -165,7 +165,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
 			 * allows for DP flows to be left untouched.
 			 */
 			if (skb_queue_len(&sch->q) < sch->dev->tx_queue_len)
-				return qdisc_enqueue_tail(skb, sch);
+				return qdisc_q_enqueue_tail(skb, sch);
 			else
 				goto drop;
 		}
@@ -228,7 +228,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
 
 	if (q->backlog + skb->len <= q->limit) {
 		q->backlog += skb->len;
-		return qdisc_enqueue_tail(skb, sch);
+		return qdisc_q_enqueue_tail(skb, sch);
 	}
 
 	q->stats.pdrop++;
@@ -257,7 +257,7 @@ static int gred_requeue(struct sk_buff *skb, struct Qdisc* sch)
 		q->backlog += skb->len;
 	}
 
-	return qdisc_requeue(skb, sch);
+	return qdisc_q_requeue(skb, sch);
 }
 
 static struct sk_buff *gred_dequeue(struct Qdisc* sch)
@@ -265,7 +265,7 @@ static struct sk_buff *gred_dequeue(struct Qdisc* sch)
 	struct sk_buff *skb;
 	struct gred_sched *t = qdisc_priv(sch);
 
-	skb = qdisc_dequeue_head(sch);
+	skb = qdisc_q_dequeue_head(sch);
 
 	if (skb) {
 		struct gred_sched_data *q;
@@ -297,7 +297,7 @@ static unsigned int gred_drop(struct Qdisc* sch)
 	struct sk_buff *skb;
 	struct gred_sched *t = qdisc_priv(sch);
 
-	skb = qdisc_dequeue_tail(sch);
+	skb = qdisc_q_dequeue_tail(sch);
 	if (skb) {
 		unsigned int len = skb->len;
 		struct gred_sched_data *q;
@@ -332,7 +332,7 @@ static void gred_reset(struct Qdisc* sch)
 	int i;
 	struct gred_sched *t = qdisc_priv(sch);
 
-	qdisc_reset_queue(sch);
+	qdisc_q_reset(sch);
 
 	for (i = 0; i < t->DPs; i++) {
 		struct gred_sched_data *q = t->tab[i];
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 5342a2f..3ec4a81 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -470,7 +470,7 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
 		/* Optimize for add at tail */
 		if (likely(skb_queue_empty(list) || tnext >= q->oldest)) {
 			q->oldest = tnext;
-			return qdisc_enqueue_tail(nskb, sch);
+			return qdisc_q_enqueue_tail(nskb, sch);
 		}
 
 		skb_queue_reverse_walk(list, skb) {
@@ -526,11 +526,11 @@ static struct Qdisc_ops tfifo_qdisc_ops __read_mostly = {
 	.id		=	"tfifo",
 	.priv_size	=	sizeof(struct fifo_sched_data),
 	.enqueue	=	tfifo_enqueue,
-	.dequeue	=	qdisc_dequeue_head,
-	.requeue	=	qdisc_requeue,
-	.drop		=	qdisc_queue_drop,
+	.dequeue	=	qdisc_q_dequeue_head,
+	.requeue	=	qdisc_q_requeue,
+	.drop		=	qdisc_q_drop,
 	.init		=	tfifo_init,
-	.reset		=	qdisc_reset_queue,
+	.reset		=	qdisc_q_reset,
 	.change		=	tfifo_init,
 	.dump		=	tfifo_dump,
 };

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [RFC NET_SCHED 03/05]: Introduce child qdisc helpers
  2008-01-20 18:28 [RFC NET_SCHED 00/05]: Pseudo-classful qdisc consolidation Patrick McHardy
  2008-01-20 18:28 ` [RFC NET_SCHED 01/05]: Consolidate default fifo setup Patrick McHardy
  2008-01-20 18:28 ` [RFC NET_SCHED 02/05]: Rename qdisc helpers for built-in queue Patrick McHardy
@ 2008-01-20 18:28 ` Patrick McHardy
  2008-01-20 18:28 ` [RFC NET_SCHED 04/05]: Use " Patrick McHardy
  2008-01-20 18:28 ` [RFC NET_SCHED 05/05]: Consolidate class ops for pseudo classful qdisc Patrick McHardy
  4 siblings, 0 replies; 7+ messages in thread
From: Patrick McHardy @ 2008-01-20 18:28 UTC (permalink / raw)
  To: netdev; +Cc: Patrick McHardy

commit a6d1954517202bffb14f5122756891d8c5b8e2e2
Author: Patrick McHardy <kaber@trash.net>
Date:   Wed Jan 16 12:08:18 2008 +0100

    [NET_SCHED]: Introduce child qdisc helpers
    
    Introduce a few helpers to dispatch calls to child qdiscs without
    repeating the qdisc argument every time.
    
    Signed-off-by: Patrick McHardy <kaber@trash.net>

diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 3ade673..decc339 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -180,6 +180,26 @@ extern struct Qdisc *qdisc_create_dflt(struct net_device *dev,
 extern void tcf_destroy(struct tcf_proto *tp);
 extern void tcf_destroy_chain(struct tcf_proto *fl);
 
+static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+	return sch->enqueue(skb, sch);
+}
+
+static inline struct sk_buff *qdisc_dequeue(struct Qdisc *sch)
+{
+	return sch->dequeue(sch);
+}
+
+static inline int qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch)
+{
+	return sch->ops->requeue(skb, sch);
+}
+
+static inline unsigned int qdisc_drop(struct Qdisc *sch)
+{
+	return sch->ops->drop ? sch->ops->drop(sch) : 0;
+}
+
 static inline int __qdisc_q_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
 					 struct sk_buff_head *list)
 {
@@ -278,7 +298,7 @@ static inline unsigned int qdisc_q_drop(struct Qdisc *sch)
 	return __qdisc_q_drop(sch, &sch->q);
 }
 
-static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
+static inline int qdisc_drop_skb(struct sk_buff *skb, struct Qdisc *sch)
 {
 	kfree_skb(skb);
 	sch->qstats.drops++;
diff --git a/net/sched/sch_blackhole.c b/net/sched/sch_blackhole.c
index 507fb48..ac374eb 100644
--- a/net/sched/sch_blackhole.c
+++ b/net/sched/sch_blackhole.c
@@ -19,7 +19,7 @@
 
 static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
-	qdisc_drop(skb, sch);
+	qdisc_drop_skb(skb, sch);
 	return NET_XMIT_SUCCESS;
 }
 
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 6afd59e..8e186e1 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -347,7 +347,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
 		return __qdisc_q_enqueue_tail(skb, qdisc, list);
 	}
 
-	return qdisc_drop(skb, qdisc);
+	return qdisc_drop_skb(skb, qdisc);
 }
 
 static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index d933565..ca65e7c 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -233,10 +233,10 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
 
 	q->stats.pdrop++;
 drop:
-	return qdisc_drop(skb, sch);
+	return qdisc_drop_skb(skb, sch);
 
 congestion_drop:
-	qdisc_drop(skb, sch);
+	qdisc_drop_skb(skb, sch);
 	return NET_XMIT_CN;
 }
 
@@ -316,7 +316,7 @@ static unsigned int gred_drop(struct Qdisc* sch)
 				red_start_of_idle_period(&q->parms);
 		}
 
-		qdisc_drop(skb, sch);
+		qdisc_drop_skb(skb, sch);
 		return len;
 	}
 
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 699f83d..acf06d9 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -104,7 +104,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
 	return ret;
 
 congestion_drop:
-	qdisc_drop(skb, sch);
+	qdisc_drop_skb(skb, sch);
 	return NET_XMIT_CN;
 }
 
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index c58fa6e..0b46589 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -257,7 +257,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc* sch)
 	 * i.e. drop _this_ packet.
 	 */
 	if (q->qs[x].qlen >= q->limit)
-		return qdisc_drop(skb, sch);
+		return qdisc_drop_skb(skb, sch);
 
 	sch->qstats.backlog += skb->len;
 	__skb_queue_tail(&q->qs[x], skb);

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [RFC NET_SCHED 04/05]: Use qdisc helpers
  2008-01-20 18:28 [RFC NET_SCHED 00/05]: Pseudo-classful qdisc consolidation Patrick McHardy
                   ` (2 preceding siblings ...)
  2008-01-20 18:28 ` [RFC NET_SCHED 03/05]: Introduce child qdisc helpers Patrick McHardy
@ 2008-01-20 18:28 ` Patrick McHardy
  2008-01-20 18:28 ` [RFC NET_SCHED 05/05]: Consolidate class ops for pseudo classful qdisc Patrick McHardy
  4 siblings, 0 replies; 7+ messages in thread
From: Patrick McHardy @ 2008-01-20 18:28 UTC (permalink / raw)
  To: netdev; +Cc: Patrick McHardy

commit 8b0737e99efbf5b51f950d9fa95d69f96bf0a926
Author: Patrick McHardy <kaber@trash.net>
Date:   Wed Jan 16 12:22:00 2008 +0100

    [NET_SCHED]: Use qdisc helpers
    
    Use the new qdisc helpers where possible. Also pull return value
    assignments out of conditions and use proper NET_XMIT codes where
    possible.
    
    Signed-off-by: Patrick McHardy <kaber@trash.net>

diff --git a/net/core/dev.c b/net/core/dev.c
index 385b799..663031c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1673,7 +1673,7 @@ gso:
 		if (q->enqueue) {
 			/* reset queue_mapping to zero */
 			skb_set_queue_mapping(skb, 0);
-			rc = q->enqueue(skb, q);
+			rc = qdisc_enqueue(skb, q);
 			qdisc_run(dev);
 			spin_unlock(&dev->queue_lock);
 
@@ -1970,7 +1970,7 @@ static int ing_filter(struct sk_buff *skb)
 
 	spin_lock(&dev->ingress_lock);
 	if ((q = dev->qdisc_ingress) != NULL)
-		result = q->enqueue(skb, q);
+		result = qdisc_enqueue(skb, q);
 	spin_unlock(&dev->ingress_lock);
 
 	return result;
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index d870a41..844774d 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -430,7 +430,8 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 #endif
 	}
 
-	if ((ret = flow->q->enqueue(skb, flow->q)) != 0) {
+	ret = qdisc_enqueue(skb, flow->q);
+	if (ret != NET_XMIT_SUCCESS) {
 drop: __maybe_unused
 		sch->qstats.drops++;
 		if (flow)
@@ -478,9 +479,9 @@ static void sch_atm_dequeue(unsigned long data)
 		 * If traffic is properly shaped, this won't generate nasty
 		 * little bursts. Otherwise, it may ... (but that's okay)
 		 */
-		while ((skb = flow->q->dequeue(flow->q))) {
+		while ((skb = qdisc_dequeue(flow->q))) {
 			if (!atm_may_send(flow->vcc, skb->truesize)) {
-				(void)flow->q->ops->requeue(skb, flow->q);
+				qdisc_requeue(skb, flow->q);
 				break;
 			}
 			D2PRINTK("atm_tc_dequeue: sending on class %p\n", flow);
@@ -514,7 +515,7 @@ static struct sk_buff *atm_tc_dequeue(struct Qdisc *sch)
 
 	D2PRINTK("atm_tc_dequeue(sch %p,[qdisc %p])\n", sch, p);
 	tasklet_schedule(&p->task);
-	skb = p->link.q->dequeue(p->link.q);
+	skb = qdisc_dequeue(p->link.q);
 	if (skb)
 		sch->q.qlen--;
 	return skb;
@@ -526,7 +527,7 @@ static int atm_tc_requeue(struct sk_buff *skb, struct Qdisc *sch)
 	int ret;
 
 	D2PRINTK("atm_tc_requeue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
-	ret = p->link.q->ops->requeue(skb, p->link.q);
+	ret = qdisc_requeue(skb, p->link.q);
 	if (!ret) {
 		sch->q.qlen++;
 		sch->qstats.requeues++;
@@ -544,9 +545,11 @@ static unsigned int atm_tc_drop(struct Qdisc *sch)
 	unsigned int len;
 
 	DPRINTK("atm_tc_drop(sch %p,[qdisc %p])\n", sch, p);
-	for (flow = p->flows; flow; flow = flow->next)
-		if (flow->q->ops->drop && (len = flow->q->ops->drop(flow->q)))
+	for (flow = p->flows; flow; flow = flow->next) {
+		len = qdisc_drop(flow->q);
+		if (len > 0)
 			return len;
+	}
 	return 0;
 }
 
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index bea123f..8731f51 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -396,7 +396,8 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 #ifdef CONFIG_NET_CLS_ACT
 	cl->q->__parent = sch;
 #endif
-	if ((ret = cl->q->enqueue(skb, cl->q)) == NET_XMIT_SUCCESS) {
+	ret = qdisc_enqueue(skb, cl->q);
+	if (ret == NET_XMIT_SUCCESS) {
 		sch->q.qlen++;
 		sch->bstats.packets++;
 		sch->bstats.bytes+=len;
@@ -432,7 +433,8 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch)
 	q->rx_class = cl;
 	cl->q->__parent = sch;
 #endif
-	if ((ret = cl->q->ops->requeue(skb, cl->q)) == 0) {
+	ret = qdisc_requeue(skb, cl->q);
+	if (ret == NET_XMIT_SUCCESS) {
 		sch->q.qlen++;
 		sch->qstats.requeues++;
 		if (!cl->next_alive)
@@ -580,9 +582,8 @@ static void cbq_ovl_lowprio(struct cbq_class *cl)
 
 static void cbq_ovl_drop(struct cbq_class *cl)
 {
-	if (cl->q->ops->drop)
-		if (cl->q->ops->drop(cl->q))
-			cl->qdisc->q.qlen--;
+	if (qdisc_drop(cl->q))
+		cl->qdisc->q.qlen--;
 	cl->xstats.overactions++;
 	cbq_ovl_classic(cl);
 }
@@ -680,7 +681,7 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
 		q->rx_class = cl;
 		cl->q->__parent = sch;
 
-		if (cl->q->enqueue(skb, cl->q) == 0) {
+		if (qdisc_enqueue(skb, cl->q) == NET_XMIT_SUCCESS) {
 			sch->q.qlen++;
 			sch->bstats.packets++;
 			sch->bstats.bytes+=len;
@@ -880,7 +881,7 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
 				goto next_class;
 			}
 
-			skb = cl->q->dequeue(cl->q);
+			skb = qdisc_dequeue(cl->q);
 
 			/* Class did not give us any skb :-(
 			   It could occur even if cl->q->q.qlen != 0
@@ -1226,7 +1227,8 @@ static unsigned int cbq_drop(struct Qdisc* sch)
 
 		cl = cl_head;
 		do {
-			if (cl->q->ops->drop && (len = cl->q->ops->drop(cl->q))) {
+			len = qdisc_drop(cl->q);
+			if (len > 0) {
 				sch->q.qlen--;
 				if (!cl->q->q.qlen)
 					cbq_deactivate_class(cl);
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index b9fe697..9bdb46e 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -257,7 +257,7 @@ static int dsmark_enqueue(struct sk_buff *skb,struct Qdisc *sch)
 		}
 	}
 
-	err = p->q->enqueue(skb,p->q);
+	err = qdisc_enqueue(skb, p->q);
 	if (err != NET_XMIT_SUCCESS) {
 		sch->qstats.drops++;
 		return err;
@@ -278,7 +278,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
 
 	D2PRINTK("dsmark_dequeue(sch %p,[qdisc %p])\n", sch, p);
 
-	skb = p->q->ops->dequeue(p->q);
+	skb = qdisc_dequeue(p->q);
 	if (skb == NULL)
 		return NULL;
 
@@ -319,7 +319,7 @@ static int dsmark_requeue(struct sk_buff *skb,struct Qdisc *sch)
 
 	D2PRINTK("dsmark_requeue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
 
-	err = p->q->ops->requeue(skb, p->q);
+	err = qdisc_requeue(skb, p->q);
 	if (err != NET_XMIT_SUCCESS) {
 		sch->qstats.drops++;
 		return err;
@@ -338,10 +338,7 @@ static unsigned int dsmark_drop(struct Qdisc *sch)
 
 	DPRINTK("dsmark_reset(sch %p,[qdisc %p])\n", sch, p);
 
-	if (p->q->ops->drop == NULL)
-		return 0;
-
-	len = p->q->ops->drop(p->q);
+	len = qdisc_drop(p->q);
 	if (len)
 		sch->q.qlen--;
 
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 8e186e1..483f753 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -76,7 +76,7 @@ static inline struct sk_buff *dev_dequeue_skb(struct net_device *dev,
 	if ((skb = dev->gso_skb))
 		dev->gso_skb = NULL;
 	else
-		skb = q->dequeue(q);
+		skb = qdisc_dequeue(q);
 
 	return skb;
 }
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index ff03327..71d7442 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -892,14 +892,14 @@ qdisc_peek_len(struct Qdisc *sch)
 	struct sk_buff *skb;
 	unsigned int len;
 
-	skb = sch->dequeue(sch);
+	skb = qdisc_dequeue(sch);
 	if (skb == NULL) {
 		if (net_ratelimit())
 			printk("qdisc_peek_len: non work-conserving qdisc ?\n");
 		return 0;
 	}
 	len = skb->len;
-	if (unlikely(sch->ops->requeue(skb, sch) != NET_XMIT_SUCCESS)) {
+	if (unlikely(qdisc_requeue(skb, sch) != NET_XMIT_SUCCESS)) {
 		if (net_ratelimit())
 			printk("qdisc_peek_len: failed to requeue\n");
 		qdisc_tree_decrease_qlen(sch, 1);
@@ -1574,7 +1574,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 	}
 
 	len = skb->len;
-	err = cl->qdisc->enqueue(skb, cl->qdisc);
+	err = qdisc_enqueue(skb, cl->qdisc);
 	if (unlikely(err != NET_XMIT_SUCCESS)) {
 		cl->qstats.drops++;
 		sch->qstats.drops++;
@@ -1630,7 +1630,7 @@ hfsc_dequeue(struct Qdisc *sch)
 		}
 	}
 
-	skb = cl->qdisc->dequeue(cl->qdisc);
+	skb = qdisc_dequeue(cl->qdisc);
 	if (skb == NULL) {
 		if (net_ratelimit())
 			printk("HFSC: Non-work-conserving qdisc ?\n");
@@ -1681,8 +1681,8 @@ hfsc_drop(struct Qdisc *sch)
 	unsigned int len;
 
 	list_for_each_entry(cl, &q->droplist, dlist) {
-		if (cl->qdisc->ops->drop != NULL &&
-		    (len = cl->qdisc->ops->drop(cl->qdisc)) > 0) {
+		len = qdisc_drop(cl->qdisc);
+		if (len > 0) {
 			if (cl->qdisc->q.qlen == 0) {
 				update_vf(cl, 0, 0);
 				set_passive(cl);
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 72beb66..ca3d4a5 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -592,7 +592,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 		kfree_skb(skb);
 		return ret;
 #endif
-	} else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) !=
+	} else if (qdisc_enqueue(skb, cl->un.leaf.q) !=
 		   NET_XMIT_SUCCESS) {
 		sch->qstats.drops++;
 		cl->qstats.drops++;
@@ -629,7 +629,7 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
 			sch->qstats.drops++;
 			return NET_XMIT_CN;
 		}
-	} else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) !=
+	} else if (qdisc_requeue(skb, cl->un.leaf.q) !=
 		   NET_XMIT_SUCCESS) {
 		sch->qstats.drops++;
 		cl->qstats.drops++;
@@ -849,7 +849,7 @@ next:
 			goto next;
 		}
 
-		skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
+		skb = qdisc_dequeue(cl->un.leaf.q);
 		if (likely(skb != NULL))
 			break;
 		if (!cl->warned) {
@@ -949,8 +949,9 @@ static unsigned int htb_drop(struct Qdisc *sch)
 			struct htb_class *cl = list_entry(p, struct htb_class,
 							  un.leaf.drop_list);
 			unsigned int len;
-			if (cl->un.leaf.q->ops->drop &&
-			    (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
+
+			len = qdisc_drop(cl->un.leaf.q);
+			if (len) {
 				sch->q.qlen--;
 				if (!cl->un.leaf.q->q.qlen)
 					htb_deactivate(q, cl);
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 3ec4a81..f6c24fd 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -184,7 +184,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
 		q->duplicate = 0;
 
-		rootq->enqueue(skb2, rootq);
+		qdisc_enqueue(skb2, rootq);
 		q->duplicate = dupsave;
 	}
 
@@ -218,7 +218,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 		now = psched_get_time();
 		cb->time_to_send = now + delay;
 		++q->counter;
-		ret = q->qdisc->enqueue(skb, q->qdisc);
+		ret = qdisc_enqueue(skb, q->qdisc);
 	} else {
 		/*
 		 * Do re-ordering by putting one out of N packets at the front
@@ -226,7 +226,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 		 */
 		cb->time_to_send = psched_get_time();
 		q->counter = 0;
-		ret = q->qdisc->ops->requeue(skb, q->qdisc);
+		ret = qdisc_requeue(skb, q->qdisc);
 	}
 
 	if (likely(ret == NET_XMIT_SUCCESS)) {
@@ -246,7 +246,8 @@ static int netem_requeue(struct sk_buff *skb, struct Qdisc *sch)
 	struct netem_sched_data *q = qdisc_priv(sch);
 	int ret;
 
-	if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) {
+	ret = qdisc_requeue(skb, q->qdisc);
+	if (ret == NET_XMIT_SUCCESS) {
 		sch->q.qlen++;
 		sch->qstats.requeues++;
 	}
@@ -259,7 +260,8 @@ static unsigned int netem_drop(struct Qdisc* sch)
 	struct netem_sched_data *q = qdisc_priv(sch);
 	unsigned int len = 0;
 
-	if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
+	len = qdisc_drop(q->qdisc);
+	if (len > 0) {
 		sch->q.qlen--;
 		sch->qstats.drops++;
 	}
@@ -275,7 +277,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
 	if (sch->flags & TCQ_F_THROTTLED)
 		return NULL;
 
-	skb = q->qdisc->dequeue(q->qdisc);
+	skb = qdisc_dequeue(q->qdisc);
 	if (skb) {
 		const struct netem_skb_cb *cb
 			= (const struct netem_skb_cb *)skb->cb;
@@ -288,7 +290,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
 			return skb;
 		}
 
-		if (unlikely(q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS)) {
+		if (unlikely(qdisc_requeue(skb, q->qdisc) != NET_XMIT_SUCCESS)) {
 			qdisc_tree_decrease_qlen(q->qdisc, 1);
 			sch->qstats.drops++;
 			printk(KERN_ERR "netem: %s could not requeue\n",
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 2243aaa..800accc 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -86,7 +86,8 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 	}
 #endif
 
-	if ((ret = qdisc->enqueue(skb, qdisc)) == NET_XMIT_SUCCESS) {
+	ret = qdisc_enqueue(skb, qdisc);
+	if (ret == NET_XMIT_SUCCESS) {
 		sch->bstats.bytes += skb->len;
 		sch->bstats.packets++;
 		sch->q.qlen++;
@@ -113,7 +114,8 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch)
 	}
 #endif
 
-	if ((ret = qdisc->ops->requeue(skb, qdisc)) == NET_XMIT_SUCCESS) {
+	ret = qdisc_requeue(skb, qdisc);
+	if (ret == NET_XMIT_SUCCESS) {
 		sch->q.qlen++;
 		sch->qstats.requeues++;
 		return 0;
@@ -138,7 +140,7 @@ prio_dequeue(struct Qdisc* sch)
 		 */
 		if (!__netif_subqueue_stopped(sch->dev, (q->mq ? prio : 0))) {
 			qdisc = q->queues[prio];
-			skb = qdisc->dequeue(qdisc);
+			skb = qdisc_dequeue(qdisc);
 			if (skb) {
 				sch->q.qlen--;
 				return skb;
@@ -168,7 +170,7 @@ static struct sk_buff *rr_dequeue(struct Qdisc* sch)
 		if (!__netif_subqueue_stopped(sch->dev,
 					    (q->mq ? q->curband : 0))) {
 			qdisc = q->queues[q->curband];
-			skb = qdisc->dequeue(qdisc);
+			skb = qdisc_dequeue(qdisc);
 			if (skb) {
 				sch->q.qlen--;
 				q->curband++;
@@ -193,7 +195,8 @@ static unsigned int prio_drop(struct Qdisc* sch)
 
 	for (prio = q->bands-1; prio >= 0; prio--) {
 		qdisc = q->queues[prio];
-		if (qdisc->ops->drop && (len = qdisc->ops->drop(qdisc)) != 0) {
+		len = qdisc_drop(qdisc);
+		if (len > 0) {
 			sch->q.qlen--;
 			return len;
 		}
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index acf06d9..076f1ef 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -92,7 +92,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
 			break;
 	}
 
-	ret = child->enqueue(skb, child);
+	ret = qdisc_enqueue(skb, child);
 	if (likely(ret == NET_XMIT_SUCCESS)) {
 		sch->bstats.bytes += skb->len;
 		sch->bstats.packets++;
@@ -117,7 +117,7 @@ static int red_requeue(struct sk_buff *skb, struct Qdisc* sch)
 	if (red_is_idling(&q->parms))
 		red_end_of_idle_period(&q->parms);
 
-	ret = child->ops->requeue(skb, child);
+	ret = qdisc_requeue(skb, child);
 	if (likely(ret == NET_XMIT_SUCCESS)) {
 		sch->qstats.requeues++;
 		sch->q.qlen++;
@@ -131,7 +131,7 @@ static struct sk_buff * red_dequeue(struct Qdisc* sch)
 	struct red_sched_data *q = qdisc_priv(sch);
 	struct Qdisc *child = q->qdisc;
 
-	skb = child->dequeue(child);
+	skb = qdisc_dequeue(child);
 	if (skb)
 		sch->q.qlen--;
 	else if (!red_is_idling(&q->parms))
@@ -146,7 +146,8 @@ static unsigned int red_drop(struct Qdisc* sch)
 	struct Qdisc *child = q->qdisc;
 	unsigned int len;
 
-	if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
+	len = qdisc_drop(child);
+	if (len > 0) {
 		q->stats.other++;
 		sch->qstats.drops++;
 		sch->q.qlen--;
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index bd34355..5fd4dff 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -133,7 +133,8 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
 		return NET_XMIT_DROP;
 	}
 
-	if ((ret = q->qdisc->enqueue(skb, q->qdisc)) != 0) {
+	ret = qdisc_enqueue(skb, q->qdisc);
+	if (ret != NET_XMIT_SUCCESS) {
 		sch->qstats.drops++;
 		return ret;
 	}
@@ -149,7 +150,8 @@ static int tbf_requeue(struct sk_buff *skb, struct Qdisc* sch)
 	struct tbf_sched_data *q = qdisc_priv(sch);
 	int ret;
 
-	if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) {
+	ret = qdisc_requeue(skb, q->qdisc);
+	if (ret == NET_XMIT_SUCCESS) {
 		sch->q.qlen++;
 		sch->qstats.requeues++;
 	}
@@ -162,7 +164,8 @@ static unsigned int tbf_drop(struct Qdisc* sch)
 	struct tbf_sched_data *q = qdisc_priv(sch);
 	unsigned int len = 0;
 
-	if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
+	len = qdisc_drop(q->qdisc);
+	if (len > 0) {
 		sch->q.qlen--;
 		sch->qstats.drops++;
 	}
@@ -174,7 +177,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
 	struct tbf_sched_data *q = qdisc_priv(sch);
 	struct sk_buff *skb;
 
-	skb = q->qdisc->dequeue(q->qdisc);
+	skb = qdisc_dequeue(q->qdisc);
 
 	if (skb) {
 		psched_time_t now;

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [RFC NET_SCHED 05/05]: Consolidate class ops for pseudo classful qdisc
  2008-01-20 18:28 [RFC NET_SCHED 00/05]: Pseudo-classful qdisc consolidation Patrick McHardy
                   ` (3 preceding siblings ...)
  2008-01-20 18:28 ` [RFC NET_SCHED 04/05]: Use " Patrick McHardy
@ 2008-01-20 18:28 ` Patrick McHardy
  2008-01-20 18:32   ` Patrick McHardy
  4 siblings, 1 reply; 7+ messages in thread
From: Patrick McHardy @ 2008-01-20 18:28 UTC (permalink / raw)
  To: netdev; +Cc: Patrick McHardy

commit e97ba18f7a8f9342fa06d0f5606a186b18e1d7f8
Author: Patrick McHardy <kaber@trash.net>
Date:   Wed Jan 16 12:22:06 2008 +0100

    [NET_SCHED]: Consolidate class ops for pseudo classful qdisc
    
    Signed-off-by: Patrick McHardy <kaber@trash.net>

diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index decc339..ca6e4de 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -351,4 +351,10 @@ static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask)
 }
 #endif
 
+struct pc_sched_data {
+	struct Qdisc	*qdisc;
+};
+
+extern const struct Qdisc_class_ops pseudo_classful_ops;
+
 #endif
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index f5ab54b..63882c5 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -43,6 +43,9 @@ if NET_SCHED
 
 comment "Queueing/Scheduling"
 
+config NET_SCH_PC
+	tristate
+
 config NET_SCH_CBQ
 	tristate "Class Based Queueing (CBQ)"
 	---help---
@@ -141,6 +144,7 @@ config NET_SCH_SFQ
 
 config NET_SCH_TEQL
 	tristate "True Link Equalizer (TEQL)"
+	select NET_SCH_PC
 	---help---
 	  Say Y here if you want to use the True Link Equalizer (TLE) packet
 	  scheduling algorithm. This queueing discipline allows the combination
@@ -153,6 +157,7 @@ config NET_SCH_TEQL
 
 config NET_SCH_TBF
 	tristate "Token Bucket Filter (TBF)"
+	select NET_SCH_PC
 	---help---
 	  Say Y here if you want to use the Token Bucket Filter (TBF) packet
 	  scheduling algorithm.
@@ -186,6 +191,7 @@ config NET_SCH_DSMARK
 
 config NET_SCH_NETEM
 	tristate "Network emulator (NETEM)"
+	select NET_SCH_PC
 	---help---
 	  Say Y if you want to emulate network delay, loss, and packet
 	  re-ordering. This is often useful to simulate networks when
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 81ecbe8..593bb3a 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_NET_ACT_IPT)	+= act_ipt.o
 obj-$(CONFIG_NET_ACT_NAT)	+= act_nat.o
 obj-$(CONFIG_NET_ACT_PEDIT)	+= act_pedit.o
 obj-$(CONFIG_NET_ACT_SIMP)	+= act_simple.o
+obj-$(CONFIG_NET_SCH_PC)	+= sch_pseudo_classful.o
 obj-$(CONFIG_NET_SCH_FIFO)	+= sch_fifo.o
 obj-$(CONFIG_NET_SCH_CBQ)	+= sch_cbq.o
 obj-$(CONFIG_NET_SCH_HTB)	+= sch_htb.o
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index f6c24fd..2444a97 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -52,7 +52,8 @@
 */
 
 struct netem_sched_data {
-	struct Qdisc	*qdisc;
+	struct pc_sched_data class;
+
 	struct qdisc_watchdog watchdog;
 
 	psched_tdiff_t latency;
@@ -218,7 +219,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 		now = psched_get_time();
 		cb->time_to_send = now + delay;
 		++q->counter;
-		ret = qdisc_enqueue(skb, q->qdisc);
+		ret = qdisc_enqueue(skb, q->class.qdisc);
 	} else {
 		/*
 		 * Do re-ordering by putting one out of N packets at the front
@@ -226,7 +227,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 		 */
 		cb->time_to_send = psched_get_time();
 		q->counter = 0;
-		ret = qdisc_requeue(skb, q->qdisc);
+		ret = qdisc_requeue(skb, q->class.qdisc);
 	}
 
 	if (likely(ret == NET_XMIT_SUCCESS)) {
@@ -246,7 +247,7 @@ static int netem_requeue(struct sk_buff *skb, struct Qdisc *sch)
 	struct netem_sched_data *q = qdisc_priv(sch);
 	int ret;
 
-	ret = qdisc_requeue(skb, q->qdisc);
+	ret = qdisc_requeue(skb, q->class.qdisc);
 	if (ret == NET_XMIT_SUCCESS) {
 		sch->q.qlen++;
 		sch->qstats.requeues++;
@@ -260,7 +261,7 @@ static unsigned int netem_drop(struct Qdisc* sch)
 	struct netem_sched_data *q = qdisc_priv(sch);
 	unsigned int len = 0;
 
-	len = qdisc_drop(q->qdisc);
+	len = qdisc_drop(q->class.qdisc);
 	if (len > 0) {
 		sch->q.qlen--;
 		sch->qstats.drops++;
@@ -277,7 +278,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
 	if (sch->flags & TCQ_F_THROTTLED)
 		return NULL;
 
-	skb = qdisc_dequeue(q->qdisc);
+	skb = qdisc_dequeue(q->class.qdisc);
 	if (skb) {
 		const struct netem_skb_cb *cb
 			= (const struct netem_skb_cb *)skb->cb;
@@ -290,11 +291,11 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
 			return skb;
 		}
 
-		if (unlikely(qdisc_requeue(skb, q->qdisc) != NET_XMIT_SUCCESS)) {
-			qdisc_tree_decrease_qlen(q->qdisc, 1);
+		if (unlikely(qdisc_requeue(skb, q->class.qdisc) != NET_XMIT_SUCCESS)) {
+			qdisc_tree_decrease_qlen(q->class.qdisc, 1);
 			sch->qstats.drops++;
 			printk(KERN_ERR "netem: %s could not requeue\n",
-			       q->qdisc->ops->id);
+			       q->class.qdisc->ops->id);
 		}
 
 		qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
@@ -307,7 +308,7 @@ static void netem_reset(struct Qdisc *sch)
 {
 	struct netem_sched_data *q = qdisc_priv(sch);
 
-	qdisc_reset(q->qdisc);
+	qdisc_reset(q->class.qdisc);
 	sch->q.qlen = 0;
 	qdisc_watchdog_cancel(&q->watchdog);
 }
@@ -394,7 +395,7 @@ static int netem_change(struct Qdisc *sch, struct rtattr *opt)
 		return -EINVAL;
 
 	qopt = RTA_DATA(opt);
-	ret = fifo_set_limit(q->qdisc, qopt->limit);
+	ret = fifo_set_limit(q->class.qdisc, qopt->limit);
 	if (ret) {
 		pr_debug("netem: can't set fifo limit\n");
 		return ret;
@@ -547,9 +548,9 @@ static int netem_init(struct Qdisc *sch, struct rtattr *opt)
 
 	qdisc_watchdog_init(&q->watchdog, sch);
 
-	q->qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops,
-				     TC_H_MAKE(sch->handle, 1));
-	if (!q->qdisc) {
+	q->class.qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops,
+					   TC_H_MAKE(sch->handle, 1));
+	if (!q->class.qdisc) {
 		pr_debug("netem: qdisc create failed\n");
 		return -ENOMEM;
 	}
@@ -557,7 +558,7 @@ static int netem_init(struct Qdisc *sch, struct rtattr *opt)
 	ret = netem_change(sch, opt);
 	if (ret) {
 		pr_debug("netem: change failed\n");
-		qdisc_destroy(q->qdisc);
+		qdisc_destroy(q->class.qdisc);
 	}
 	return ret;
 }
@@ -567,7 +568,7 @@ static void netem_destroy(struct Qdisc *sch)
 	struct netem_sched_data *q = qdisc_priv(sch);
 
 	qdisc_watchdog_cancel(&q->watchdog);
-	qdisc_destroy(q->qdisc);
+	qdisc_destroy(q->class.qdisc);
 	kfree(q->delay_dist);
 }
 
@@ -611,95 +612,9 @@ rtattr_failure:
 	return -1;
 }
 
-static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
-			  struct sk_buff *skb, struct tcmsg *tcm)
-{
-	struct netem_sched_data *q = qdisc_priv(sch);
-
-	if (cl != 1) 	/* only one class */
-		return -ENOENT;
-
-	tcm->tcm_handle |= TC_H_MIN(1);
-	tcm->tcm_info = q->qdisc->handle;
-
-	return 0;
-}
-
-static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-		     struct Qdisc **old)
-{
-	struct netem_sched_data *q = qdisc_priv(sch);
-
-	if (new == NULL)
-		new = &noop_qdisc;
-
-	sch_tree_lock(sch);
-	*old = xchg(&q->qdisc, new);
-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
-	qdisc_reset(*old);
-	sch_tree_unlock(sch);
-
-	return 0;
-}
-
-static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
-{
-	struct netem_sched_data *q = qdisc_priv(sch);
-	return q->qdisc;
-}
-
-static unsigned long netem_get(struct Qdisc *sch, u32 classid)
-{
-	return 1;
-}
-
-static void netem_put(struct Qdisc *sch, unsigned long arg)
-{
-}
-
-static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
-			    struct rtattr **tca, unsigned long *arg)
-{
-	return -ENOSYS;
-}
-
-static int netem_delete(struct Qdisc *sch, unsigned long arg)
-{
-	return -ENOSYS;
-}
-
-static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
-{
-	if (!walker->stop) {
-		if (walker->count >= walker->skip)
-			if (walker->fn(sch, 1, walker) < 0) {
-				walker->stop = 1;
-				return;
-			}
-		walker->count++;
-	}
-}
-
-static struct tcf_proto **netem_find_tcf(struct Qdisc *sch, unsigned long cl)
-{
-	return NULL;
-}
-
-static const struct Qdisc_class_ops netem_class_ops = {
-	.graft		=	netem_graft,
-	.leaf		=	netem_leaf,
-	.get		=	netem_get,
-	.put		=	netem_put,
-	.change		=	netem_change_class,
-	.delete		=	netem_delete,
-	.walk		=	netem_walk,
-	.tcf_chain	=	netem_find_tcf,
-	.dump		=	netem_dump_class,
-};
-
 static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
 	.id		=	"netem",
-	.cl_ops		=	&netem_class_ops,
+	.cl_ops		=	&pseudo_classful_ops,
 	.priv_size	=	sizeof(struct netem_sched_data),
 	.enqueue	=	netem_enqueue,
 	.dequeue	=	netem_dequeue,
diff --git a/net/sched/sch_pseudo_classful.c b/net/sched/sch_pseudo_classful.c
new file mode 100644
index 0000000..5b9fba5
--- /dev/null
+++ b/net/sched/sch_pseudo_classful.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2007 Patrick McHardy, <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <net/sch_generic.h>
+#include <net/pkt_sched.h>
+
+static int sch_pc_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+			struct Qdisc **old)
+{
+	struct pc_sched_data *q = qdisc_priv(sch);
+
+	if (new == NULL)
+		new = &noop_qdisc;
+
+	sch_tree_lock(sch);
+	*old = xchg(&q->qdisc, new);
+	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+	qdisc_reset(*old);
+	sch_tree_unlock(sch);
+	return 0;
+}
+
+static int sch_pc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+			       struct rtattr **tca, unsigned long *arg)
+{
+	return -ENOSYS;
+}
+
+static struct Qdisc *sch_pc_leaf(struct Qdisc *sch, unsigned long arg)
+{
+	return ((struct pc_sched_data *)qdisc_priv(sch))->qdisc;
+}
+
+static unsigned long sch_pc_get(struct Qdisc *sch, u32 classid)
+{
+	return 1;
+}
+
+static void sch_pc_put(struct Qdisc *sch, unsigned long arg)
+{
+	return;
+}
+
+static int sch_pc_delete(struct Qdisc *sch, unsigned long arg)
+{
+	return -ENOSYS;
+}
+
+static struct tcf_proto **sch_pc_tcf_chain(struct Qdisc *sch, unsigned long cl)
+{
+	return NULL;
+}
+
+static int sch_pc_dump_class(struct Qdisc *sch, unsigned long cl,
+			     struct sk_buff *skb, struct tcmsg *tcm)
+{
+	struct pc_sched_data *q = qdisc_priv(sch);
+
+	if (cl != 1)
+		return -ENOENT;
+
+	tcm->tcm_handle |= TC_H_MIN(1);
+	tcm->tcm_info = q->qdisc->handle;
+	return 0;
+}
+
+static void sch_pc_walk(struct Qdisc *sch, struct qdisc_walker *walker)
+{
+	if (!walker->stop) {
+		if (walker->count >= walker->skip)
+			if (walker->fn(sch, 1, walker) < 0) {
+				walker->stop = 1;
+				return;
+			}
+		walker->count++;
+	}
+}
+
+const struct Qdisc_class_ops pseudo_classful_ops = {
+	.graft		= sch_pc_graft,
+	.leaf		= sch_pc_leaf,
+	.get		= sch_pc_get,
+	.put		= sch_pc_put,
+	.change		= sch_pc_change_class,
+	.delete		= sch_pc_delete,
+	.walk		= sch_pc_walk,
+	.tcf_chain	= sch_pc_tcf_chain,
+	.dump		= sch_pc_dump_class,
+};
+EXPORT_SYMBOL(pseudo_classful_ops);
+
+MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 076f1ef..fc61675 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -38,11 +38,12 @@
 
 struct red_sched_data
 {
+	struct pc_sched_data	class;
+
 	u32			limit;		/* HARD maximal queue length */
 	unsigned char		flags;
 	struct red_parms	parms;
 	struct red_stats	stats;
-	struct Qdisc		*qdisc;
 };
 
 static inline int red_use_ecn(struct red_sched_data *q)
@@ -58,7 +59,7 @@ static inline int red_use_harddrop(struct red_sched_data *q)
 static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
-	struct Qdisc *child = q->qdisc;
+	struct Qdisc *child = q->class.qdisc;
 	int ret;
 
 	q->parms.qavg = red_calc_qavg(&q->parms, child->qstats.backlog);
@@ -111,7 +112,7 @@ congestion_drop:
 static int red_requeue(struct sk_buff *skb, struct Qdisc* sch)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
-	struct Qdisc *child = q->qdisc;
+	struct Qdisc *child = q->class.qdisc;
 	int ret;
 
 	if (red_is_idling(&q->parms))
@@ -129,7 +130,7 @@ static struct sk_buff * red_dequeue(struct Qdisc* sch)
 {
 	struct sk_buff *skb;
 	struct red_sched_data *q = qdisc_priv(sch);
-	struct Qdisc *child = q->qdisc;
+	struct Qdisc *child = q->class.qdisc;
 
 	skb = qdisc_dequeue(child);
 	if (skb)
@@ -143,7 +144,7 @@ static struct sk_buff * red_dequeue(struct Qdisc* sch)
 static unsigned int red_drop(struct Qdisc* sch)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
-	struct Qdisc *child = q->qdisc;
+	struct Qdisc *child = q->class.qdisc;
 	unsigned int len;
 
 	len = qdisc_drop(child);
@@ -164,7 +165,7 @@ static void red_reset(struct Qdisc* sch)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
 
-	qdisc_reset(q->qdisc);
+	qdisc_reset(q->class.qdisc);
 	sch->q.qlen = 0;
 	red_restart(&q->parms);
 }
@@ -172,7 +173,7 @@ static void red_reset(struct Qdisc* sch)
 static void red_destroy(struct Qdisc *sch)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
-	qdisc_destroy(q->qdisc);
+	qdisc_destroy(q->class.qdisc);
 }
 
 static int red_change(struct Qdisc *sch, struct rtattr *opt)
@@ -203,8 +204,9 @@ static int red_change(struct Qdisc *sch, struct rtattr *opt)
 	q->flags = ctl->flags;
 	q->limit = ctl->limit;
 	if (child) {
-		qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
-		qdisc_destroy(xchg(&q->qdisc, child));
+		qdisc_tree_decrease_qlen(q->class.qdisc,
+					 q->class.qdisc->q.qlen);
+		qdisc_destroy(xchg(&q->class.qdisc, child));
 	}
 
 	red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
@@ -222,7 +224,7 @@ static int red_init(struct Qdisc* sch, struct rtattr *opt)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
 
-	q->qdisc = &noop_qdisc;
+	q->class.qdisc = &noop_qdisc;
 	return red_change(sch, opt);
 }
 
@@ -261,94 +263,10 @@ static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
 	return gnet_stats_copy_app(d, &st, sizeof(st));
 }
 
-static int red_dump_class(struct Qdisc *sch, unsigned long cl,
-			  struct sk_buff *skb, struct tcmsg *tcm)
-{
-	struct red_sched_data *q = qdisc_priv(sch);
-
-	if (cl != 1)
-		return -ENOENT;
-	tcm->tcm_handle |= TC_H_MIN(1);
-	tcm->tcm_info = q->qdisc->handle;
-	return 0;
-}
-
-static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-		     struct Qdisc **old)
-{
-	struct red_sched_data *q = qdisc_priv(sch);
-
-	if (new == NULL)
-		new = &noop_qdisc;
-
-	sch_tree_lock(sch);
-	*old = xchg(&q->qdisc, new);
-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
-	qdisc_reset(*old);
-	sch_tree_unlock(sch);
-	return 0;
-}
-
-static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
-{
-	struct red_sched_data *q = qdisc_priv(sch);
-	return q->qdisc;
-}
-
-static unsigned long red_get(struct Qdisc *sch, u32 classid)
-{
-	return 1;
-}
-
-static void red_put(struct Qdisc *sch, unsigned long arg)
-{
-	return;
-}
-
-static int red_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
-			    struct rtattr **tca, unsigned long *arg)
-{
-	return -ENOSYS;
-}
-
-static int red_delete(struct Qdisc *sch, unsigned long cl)
-{
-	return -ENOSYS;
-}
-
-static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
-{
-	if (!walker->stop) {
-		if (walker->count >= walker->skip)
-			if (walker->fn(sch, 1, walker) < 0) {
-				walker->stop = 1;
-				return;
-			}
-		walker->count++;
-	}
-}
-
-static struct tcf_proto **red_find_tcf(struct Qdisc *sch, unsigned long cl)
-{
-	return NULL;
-}
-
-static const struct Qdisc_class_ops red_class_ops = {
-	.graft		=	red_graft,
-	.leaf		=	red_leaf,
-	.get		=	red_get,
-	.put		=	red_put,
-	.change		=	red_change_class,
-	.delete		=	red_delete,
-	.walk		=	red_walk,
-	.tcf_chain	=	red_find_tcf,
-	.dump		=	red_dump_class,
-};
-
 static struct Qdisc_ops red_qdisc_ops __read_mostly = {
 	.id		=	"red",
 	.priv_size	=	sizeof(struct red_sched_data),
-	.cl_ops		=	&red_class_ops,
+	.cl_ops		=	&pseudo_classful_ops,
 	.enqueue	=	red_enqueue,
 	.dequeue	=	red_dequeue,
 	.requeue	=	red_requeue,
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 5fd4dff..6590ce3 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -99,6 +99,8 @@
 
 struct tbf_sched_data
 {
+	struct pc_sched_data	class;
+
 /* Parameters */
 	u32		limit;		/* Maximal length of backlog: bytes */
 	u32		buffer;		/* Token bucket depth/rate: MUST BE >= MTU/B */
@@ -111,7 +113,6 @@ struct tbf_sched_data
 	long	tokens;			/* Current number of B tokens */
 	long	ptokens;		/* Current number of P tokens */
 	psched_time_t	t_c;		/* Time check-point */
-	struct Qdisc	*qdisc;		/* Inner qdisc, default - bfifo queue */
 	struct qdisc_watchdog watchdog;	/* Watchdog timer */
 };
 
@@ -133,7 +134,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
 		return NET_XMIT_DROP;
 	}
 
-	ret = qdisc_enqueue(skb, q->qdisc);
+	ret = qdisc_enqueue(skb, q->class.qdisc);
 	if (ret != NET_XMIT_SUCCESS) {
 		sch->qstats.drops++;
 		return ret;
@@ -150,7 +151,7 @@ static int tbf_requeue(struct sk_buff *skb, struct Qdisc* sch)
 	struct tbf_sched_data *q = qdisc_priv(sch);
 	int ret;
 
-	ret = qdisc_requeue(skb, q->qdisc);
+	ret = qdisc_requeue(skb, q->class.qdisc);
 	if (ret == NET_XMIT_SUCCESS) {
 		sch->q.qlen++;
 		sch->qstats.requeues++;
@@ -164,7 +165,7 @@ static unsigned int tbf_drop(struct Qdisc* sch)
 	struct tbf_sched_data *q = qdisc_priv(sch);
 	unsigned int len = 0;
 
-	len = qdisc_drop(q->qdisc);
+	len = qdisc_drop(q->class.qdisc);
 	if (len > 0) {
 		sch->q.qlen--;
 		sch->qstats.drops++;
@@ -177,7 +178,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
 	struct tbf_sched_data *q = qdisc_priv(sch);
 	struct sk_buff *skb;
 
-	skb = qdisc_dequeue(q->qdisc);
+	skb = qdisc_dequeue(q->class.qdisc);
 
 	if (skb) {
 		psched_time_t now;
@@ -222,9 +223,9 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
 		   (cf. CSZ, HPFQ, HFSC)
 		 */
 
-		if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) {
+		if (q->class.qdisc->ops->requeue(skb, q->class.qdisc) != NET_XMIT_SUCCESS) {
 			/* When requeue fails skb is dropped */
-			qdisc_tree_decrease_qlen(q->qdisc, 1);
+			qdisc_tree_decrease_qlen(q->class.qdisc, 1);
 			sch->qstats.drops++;
 		}
 
@@ -237,7 +238,7 @@ static void tbf_reset(struct Qdisc* sch)
 {
 	struct tbf_sched_data *q = qdisc_priv(sch);
 
-	qdisc_reset(q->qdisc);
+	qdisc_reset(q->class.qdisc);
 	sch->q.qlen = 0;
 	q->t_c = psched_get_time();
 	q->tokens = q->buffer;
@@ -295,8 +296,8 @@ static int tbf_change(struct Qdisc* sch, struct rtattr *opt)
 
 	sch_tree_lock(sch);
 	if (child) {
-		qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
-		qdisc_destroy(xchg(&q->qdisc, child));
+		qdisc_tree_decrease_qlen(q->class.qdisc, q->class.qdisc->q.qlen);
+		qdisc_destroy(xchg(&q->class.qdisc, child));
 	}
 	q->limit = qopt->limit;
 	q->mtu = qopt->mtu;
@@ -325,7 +326,7 @@ static int tbf_init(struct Qdisc* sch, struct rtattr *opt)
 
 	q->t_c = psched_get_time();
 	qdisc_watchdog_init(&q->watchdog, sch);
-	q->qdisc = &noop_qdisc;
+	q->class.qdisc = &noop_qdisc;
 
 	return tbf_change(sch, opt);
 }
@@ -341,7 +342,7 @@ static void tbf_destroy(struct Qdisc *sch)
 	if (q->R_tab)
 		qdisc_put_rtab(q->R_tab);
 
-	qdisc_destroy(q->qdisc);
+	qdisc_destroy(q->class.qdisc);
 }
 
 static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -372,96 +373,8 @@ rtattr_failure:
 	return -1;
 }
 
-static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
-			  struct sk_buff *skb, struct tcmsg *tcm)
-{
-	struct tbf_sched_data *q = qdisc_priv(sch);
-
-	if (cl != 1) 	/* only one class */
-		return -ENOENT;
-
-	tcm->tcm_handle |= TC_H_MIN(1);
-	tcm->tcm_info = q->qdisc->handle;
-
-	return 0;
-}
-
-static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-		     struct Qdisc **old)
-{
-	struct tbf_sched_data *q = qdisc_priv(sch);
-
-	if (new == NULL)
-		new = &noop_qdisc;
-
-	sch_tree_lock(sch);
-	*old = xchg(&q->qdisc, new);
-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
-	qdisc_reset(*old);
-	sch_tree_unlock(sch);
-
-	return 0;
-}
-
-static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
-{
-	struct tbf_sched_data *q = qdisc_priv(sch);
-	return q->qdisc;
-}
-
-static unsigned long tbf_get(struct Qdisc *sch, u32 classid)
-{
-	return 1;
-}
-
-static void tbf_put(struct Qdisc *sch, unsigned long arg)
-{
-}
-
-static int tbf_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
-			    struct rtattr **tca, unsigned long *arg)
-{
-	return -ENOSYS;
-}
-
-static int tbf_delete(struct Qdisc *sch, unsigned long arg)
-{
-	return -ENOSYS;
-}
-
-static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
-{
-	if (!walker->stop) {
-		if (walker->count >= walker->skip)
-			if (walker->fn(sch, 1, walker) < 0) {
-				walker->stop = 1;
-				return;
-			}
-		walker->count++;
-	}
-}
-
-static struct tcf_proto **tbf_find_tcf(struct Qdisc *sch, unsigned long cl)
-{
-	return NULL;
-}
-
-static const struct Qdisc_class_ops tbf_class_ops =
-{
-	.graft		=	tbf_graft,
-	.leaf		=	tbf_leaf,
-	.get		=	tbf_get,
-	.put		=	tbf_put,
-	.change		=	tbf_change_class,
-	.delete		=	tbf_delete,
-	.walk		=	tbf_walk,
-	.tcf_chain	=	tbf_find_tcf,
-	.dump		=	tbf_dump_class,
-};
-
 static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
-	.next		=	NULL,
-	.cl_ops		=	&tbf_class_ops,
+	.cl_ops		=	&pseudo_classful_ops,
 	.id		=	"tbf",
 	.priv_size	=	sizeof(struct tbf_sched_data),
 	.enqueue	=	tbf_enqueue,

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [RFC NET_SCHED 05/05]: Consolidate class ops for pseudo classful qdisc
  2008-01-20 18:28 ` [RFC NET_SCHED 05/05]: Consolidate class ops for pseudo classful qdisc Patrick McHardy
@ 2008-01-20 18:32   ` Patrick McHardy
  0 siblings, 0 replies; 7+ messages in thread
From: Patrick McHardy @ 2008-01-20 18:32 UTC (permalink / raw)
  To: netdev

[-- Attachment #1: Type: text/plain, Size: 440 bytes --]

Patrick McHardy wrote:
> commit e97ba18f7a8f9342fa06d0f5606a186b18e1d7f8
> Author: Patrick McHardy <kaber@trash.net>
> Date:   Wed Jan 16 12:22:06 2008 +0100
> 
>     [NET_SCHED]: Consolidate class ops for pseudo classful qdisc
>     
>     Signed-off-by: Patrick McHardy <kaber@trash.net>
> 

>  config NET_SCH_TEQL
>  	tristate "True Link Equalizer (TEQL)"
> +	select NET_SCH_PC


Oops .. this should have been at the NET_SCH_RED entry.


[-- Attachment #2: 05.diff --]
[-- Type: text/x-patch, Size: 21118 bytes --]

commit d89898d2e77b5c1753d21174870277d235994afa
Author: Patrick McHardy <kaber@trash.net>
Date:   Sun Jan 20 19:31:05 2008 +0100

    [NET_SCHED]: Consolidate class ops for pseudo classful qdisc
    
    Signed-off-by: Patrick McHardy <kaber@trash.net>

diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index decc339..ca6e4de 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -351,4 +351,10 @@ static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask)
 }
 #endif
 
+struct pc_sched_data {
+	struct Qdisc	*qdisc;
+};
+
+extern const struct Qdisc_class_ops pseudo_classful_ops;
+
 #endif
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index f5ab54b..b6b4260 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -43,6 +43,9 @@ if NET_SCHED
 
 comment "Queueing/Scheduling"
 
+config NET_SCH_PC
+	tristate
+
 config NET_SCH_CBQ
 	tristate "Class Based Queueing (CBQ)"
 	---help---
@@ -119,6 +122,7 @@ config NET_SCH_RR
 
 config NET_SCH_RED
 	tristate "Random Early Detection (RED)"
+	select NET_SCH_PC
 	---help---
 	  Say Y here if you want to use the Random Early Detection (RED)
 	  packet scheduling algorithm.
@@ -153,6 +157,7 @@ config NET_SCH_TEQL
 
 config NET_SCH_TBF
 	tristate "Token Bucket Filter (TBF)"
+	select NET_SCH_PC
 	---help---
 	  Say Y here if you want to use the Token Bucket Filter (TBF) packet
 	  scheduling algorithm.
@@ -186,6 +191,7 @@ config NET_SCH_DSMARK
 
 config NET_SCH_NETEM
 	tristate "Network emulator (NETEM)"
+	select NET_SCH_PC
 	---help---
 	  Say Y if you want to emulate network delay, loss, and packet
 	  re-ordering. This is often useful to simulate networks when
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 81ecbe8..593bb3a 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_NET_ACT_IPT)	+= act_ipt.o
 obj-$(CONFIG_NET_ACT_NAT)	+= act_nat.o
 obj-$(CONFIG_NET_ACT_PEDIT)	+= act_pedit.o
 obj-$(CONFIG_NET_ACT_SIMP)	+= act_simple.o
+obj-$(CONFIG_NET_SCH_PC)	+= sch_pseudo_classful.o
 obj-$(CONFIG_NET_SCH_FIFO)	+= sch_fifo.o
 obj-$(CONFIG_NET_SCH_CBQ)	+= sch_cbq.o
 obj-$(CONFIG_NET_SCH_HTB)	+= sch_htb.o
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index f6c24fd..2444a97 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -52,7 +52,8 @@
 */
 
 struct netem_sched_data {
-	struct Qdisc	*qdisc;
+	struct pc_sched_data class;
+
 	struct qdisc_watchdog watchdog;
 
 	psched_tdiff_t latency;
@@ -218,7 +219,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 		now = psched_get_time();
 		cb->time_to_send = now + delay;
 		++q->counter;
-		ret = qdisc_enqueue(skb, q->qdisc);
+		ret = qdisc_enqueue(skb, q->class.qdisc);
 	} else {
 		/*
 		 * Do re-ordering by putting one out of N packets at the front
@@ -226,7 +227,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 		 */
 		cb->time_to_send = psched_get_time();
 		q->counter = 0;
-		ret = qdisc_requeue(skb, q->qdisc);
+		ret = qdisc_requeue(skb, q->class.qdisc);
 	}
 
 	if (likely(ret == NET_XMIT_SUCCESS)) {
@@ -246,7 +247,7 @@ static int netem_requeue(struct sk_buff *skb, struct Qdisc *sch)
 	struct netem_sched_data *q = qdisc_priv(sch);
 	int ret;
 
-	ret = qdisc_requeue(skb, q->qdisc);
+	ret = qdisc_requeue(skb, q->class.qdisc);
 	if (ret == NET_XMIT_SUCCESS) {
 		sch->q.qlen++;
 		sch->qstats.requeues++;
@@ -260,7 +261,7 @@ static unsigned int netem_drop(struct Qdisc* sch)
 	struct netem_sched_data *q = qdisc_priv(sch);
 	unsigned int len = 0;
 
-	len = qdisc_drop(q->qdisc);
+	len = qdisc_drop(q->class.qdisc);
 	if (len > 0) {
 		sch->q.qlen--;
 		sch->qstats.drops++;
@@ -277,7 +278,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
 	if (sch->flags & TCQ_F_THROTTLED)
 		return NULL;
 
-	skb = qdisc_dequeue(q->qdisc);
+	skb = qdisc_dequeue(q->class.qdisc);
 	if (skb) {
 		const struct netem_skb_cb *cb
 			= (const struct netem_skb_cb *)skb->cb;
@@ -290,11 +291,11 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
 			return skb;
 		}
 
-		if (unlikely(qdisc_requeue(skb, q->qdisc) != NET_XMIT_SUCCESS)) {
-			qdisc_tree_decrease_qlen(q->qdisc, 1);
+		if (unlikely(qdisc_requeue(skb, q->class.qdisc) != NET_XMIT_SUCCESS)) {
+			qdisc_tree_decrease_qlen(q->class.qdisc, 1);
 			sch->qstats.drops++;
 			printk(KERN_ERR "netem: %s could not requeue\n",
-			       q->qdisc->ops->id);
+			       q->class.qdisc->ops->id);
 		}
 
 		qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
@@ -307,7 +308,7 @@ static void netem_reset(struct Qdisc *sch)
 {
 	struct netem_sched_data *q = qdisc_priv(sch);
 
-	qdisc_reset(q->qdisc);
+	qdisc_reset(q->class.qdisc);
 	sch->q.qlen = 0;
 	qdisc_watchdog_cancel(&q->watchdog);
 }
@@ -394,7 +395,7 @@ static int netem_change(struct Qdisc *sch, struct rtattr *opt)
 		return -EINVAL;
 
 	qopt = RTA_DATA(opt);
-	ret = fifo_set_limit(q->qdisc, qopt->limit);
+	ret = fifo_set_limit(q->class.qdisc, qopt->limit);
 	if (ret) {
 		pr_debug("netem: can't set fifo limit\n");
 		return ret;
@@ -547,9 +548,9 @@ static int netem_init(struct Qdisc *sch, struct rtattr *opt)
 
 	qdisc_watchdog_init(&q->watchdog, sch);
 
-	q->qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops,
-				     TC_H_MAKE(sch->handle, 1));
-	if (!q->qdisc) {
+	q->class.qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops,
+					   TC_H_MAKE(sch->handle, 1));
+	if (!q->class.qdisc) {
 		pr_debug("netem: qdisc create failed\n");
 		return -ENOMEM;
 	}
@@ -557,7 +558,7 @@ static int netem_init(struct Qdisc *sch, struct rtattr *opt)
 	ret = netem_change(sch, opt);
 	if (ret) {
 		pr_debug("netem: change failed\n");
-		qdisc_destroy(q->qdisc);
+		qdisc_destroy(q->class.qdisc);
 	}
 	return ret;
 }
@@ -567,7 +568,7 @@ static void netem_destroy(struct Qdisc *sch)
 	struct netem_sched_data *q = qdisc_priv(sch);
 
 	qdisc_watchdog_cancel(&q->watchdog);
-	qdisc_destroy(q->qdisc);
+	qdisc_destroy(q->class.qdisc);
 	kfree(q->delay_dist);
 }
 
@@ -611,95 +612,9 @@ rtattr_failure:
 	return -1;
 }
 
-static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
-			  struct sk_buff *skb, struct tcmsg *tcm)
-{
-	struct netem_sched_data *q = qdisc_priv(sch);
-
-	if (cl != 1) 	/* only one class */
-		return -ENOENT;
-
-	tcm->tcm_handle |= TC_H_MIN(1);
-	tcm->tcm_info = q->qdisc->handle;
-
-	return 0;
-}
-
-static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-		     struct Qdisc **old)
-{
-	struct netem_sched_data *q = qdisc_priv(sch);
-
-	if (new == NULL)
-		new = &noop_qdisc;
-
-	sch_tree_lock(sch);
-	*old = xchg(&q->qdisc, new);
-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
-	qdisc_reset(*old);
-	sch_tree_unlock(sch);
-
-	return 0;
-}
-
-static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
-{
-	struct netem_sched_data *q = qdisc_priv(sch);
-	return q->qdisc;
-}
-
-static unsigned long netem_get(struct Qdisc *sch, u32 classid)
-{
-	return 1;
-}
-
-static void netem_put(struct Qdisc *sch, unsigned long arg)
-{
-}
-
-static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
-			    struct rtattr **tca, unsigned long *arg)
-{
-	return -ENOSYS;
-}
-
-static int netem_delete(struct Qdisc *sch, unsigned long arg)
-{
-	return -ENOSYS;
-}
-
-static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
-{
-	if (!walker->stop) {
-		if (walker->count >= walker->skip)
-			if (walker->fn(sch, 1, walker) < 0) {
-				walker->stop = 1;
-				return;
-			}
-		walker->count++;
-	}
-}
-
-static struct tcf_proto **netem_find_tcf(struct Qdisc *sch, unsigned long cl)
-{
-	return NULL;
-}
-
-static const struct Qdisc_class_ops netem_class_ops = {
-	.graft		=	netem_graft,
-	.leaf		=	netem_leaf,
-	.get		=	netem_get,
-	.put		=	netem_put,
-	.change		=	netem_change_class,
-	.delete		=	netem_delete,
-	.walk		=	netem_walk,
-	.tcf_chain	=	netem_find_tcf,
-	.dump		=	netem_dump_class,
-};
-
 static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
 	.id		=	"netem",
-	.cl_ops		=	&netem_class_ops,
+	.cl_ops		=	&pseudo_classful_ops,
 	.priv_size	=	sizeof(struct netem_sched_data),
 	.enqueue	=	netem_enqueue,
 	.dequeue	=	netem_dequeue,
diff --git a/net/sched/sch_pseudo_classful.c b/net/sched/sch_pseudo_classful.c
new file mode 100644
index 0000000..5b9fba5
--- /dev/null
+++ b/net/sched/sch_pseudo_classful.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2007 Patrick McHardy, <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <net/sch_generic.h>
+#include <net/pkt_sched.h>
+
+static int sch_pc_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+			struct Qdisc **old)
+{
+	struct pc_sched_data *q = qdisc_priv(sch);
+
+	if (new == NULL)
+		new = &noop_qdisc;
+
+	sch_tree_lock(sch);
+	*old = xchg(&q->qdisc, new);
+	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+	qdisc_reset(*old);
+	sch_tree_unlock(sch);
+	return 0;
+}
+
+static int sch_pc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+			       struct rtattr **tca, unsigned long *arg)
+{
+	return -ENOSYS;
+}
+
+static struct Qdisc *sch_pc_leaf(struct Qdisc *sch, unsigned long arg)
+{
+	return ((struct pc_sched_data *)qdisc_priv(sch))->qdisc;
+}
+
+static unsigned long sch_pc_get(struct Qdisc *sch, u32 classid)
+{
+	return 1;
+}
+
+static void sch_pc_put(struct Qdisc *sch, unsigned long arg)
+{
+	return;
+}
+
+static int sch_pc_delete(struct Qdisc *sch, unsigned long arg)
+{
+	return -ENOSYS;
+}
+
+static struct tcf_proto **sch_pc_tcf_chain(struct Qdisc *sch, unsigned long cl)
+{
+	return NULL;
+}
+
+static int sch_pc_dump_class(struct Qdisc *sch, unsigned long cl,
+			     struct sk_buff *skb, struct tcmsg *tcm)
+{
+	struct pc_sched_data *q = qdisc_priv(sch);
+
+	if (cl != 1)
+		return -ENOENT;
+
+	tcm->tcm_handle |= TC_H_MIN(1);
+	tcm->tcm_info = q->qdisc->handle;
+	return 0;
+}
+
+static void sch_pc_walk(struct Qdisc *sch, struct qdisc_walker *walker)
+{
+	if (!walker->stop) {
+		if (walker->count >= walker->skip)
+			if (walker->fn(sch, 1, walker) < 0) {
+				walker->stop = 1;
+				return;
+			}
+		walker->count++;
+	}
+}
+
+const struct Qdisc_class_ops pseudo_classful_ops = {
+	.graft		= sch_pc_graft,
+	.leaf		= sch_pc_leaf,
+	.get		= sch_pc_get,
+	.put		= sch_pc_put,
+	.change		= sch_pc_change_class,
+	.delete		= sch_pc_delete,
+	.walk		= sch_pc_walk,
+	.tcf_chain	= sch_pc_tcf_chain,
+	.dump		= sch_pc_dump_class,
+};
+EXPORT_SYMBOL(pseudo_classful_ops);
+
+MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 076f1ef..fc61675 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -38,11 +38,12 @@
 
 struct red_sched_data
 {
+	struct pc_sched_data	class;
+
 	u32			limit;		/* HARD maximal queue length */
 	unsigned char		flags;
 	struct red_parms	parms;
 	struct red_stats	stats;
-	struct Qdisc		*qdisc;
 };
 
 static inline int red_use_ecn(struct red_sched_data *q)
@@ -58,7 +59,7 @@ static inline int red_use_harddrop(struct red_sched_data *q)
 static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
-	struct Qdisc *child = q->qdisc;
+	struct Qdisc *child = q->class.qdisc;
 	int ret;
 
 	q->parms.qavg = red_calc_qavg(&q->parms, child->qstats.backlog);
@@ -111,7 +112,7 @@ congestion_drop:
 static int red_requeue(struct sk_buff *skb, struct Qdisc* sch)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
-	struct Qdisc *child = q->qdisc;
+	struct Qdisc *child = q->class.qdisc;
 	int ret;
 
 	if (red_is_idling(&q->parms))
@@ -129,7 +130,7 @@ static struct sk_buff * red_dequeue(struct Qdisc* sch)
 {
 	struct sk_buff *skb;
 	struct red_sched_data *q = qdisc_priv(sch);
-	struct Qdisc *child = q->qdisc;
+	struct Qdisc *child = q->class.qdisc;
 
 	skb = qdisc_dequeue(child);
 	if (skb)
@@ -143,7 +144,7 @@ static struct sk_buff * red_dequeue(struct Qdisc* sch)
 static unsigned int red_drop(struct Qdisc* sch)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
-	struct Qdisc *child = q->qdisc;
+	struct Qdisc *child = q->class.qdisc;
 	unsigned int len;
 
 	len = qdisc_drop(child);
@@ -164,7 +165,7 @@ static void red_reset(struct Qdisc* sch)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
 
-	qdisc_reset(q->qdisc);
+	qdisc_reset(q->class.qdisc);
 	sch->q.qlen = 0;
 	red_restart(&q->parms);
 }
@@ -172,7 +173,7 @@ static void red_reset(struct Qdisc* sch)
 static void red_destroy(struct Qdisc *sch)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
-	qdisc_destroy(q->qdisc);
+	qdisc_destroy(q->class.qdisc);
 }
 
 static int red_change(struct Qdisc *sch, struct rtattr *opt)
@@ -203,8 +204,9 @@ static int red_change(struct Qdisc *sch, struct rtattr *opt)
 	q->flags = ctl->flags;
 	q->limit = ctl->limit;
 	if (child) {
-		qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
-		qdisc_destroy(xchg(&q->qdisc, child));
+		qdisc_tree_decrease_qlen(q->class.qdisc,
+					 q->class.qdisc->q.qlen);
+		qdisc_destroy(xchg(&q->class.qdisc, child));
 	}
 
 	red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
@@ -222,7 +224,7 @@ static int red_init(struct Qdisc* sch, struct rtattr *opt)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
 
-	q->qdisc = &noop_qdisc;
+	q->class.qdisc = &noop_qdisc;
 	return red_change(sch, opt);
 }
 
@@ -261,94 +263,10 @@ static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
 	return gnet_stats_copy_app(d, &st, sizeof(st));
 }
 
-static int red_dump_class(struct Qdisc *sch, unsigned long cl,
-			  struct sk_buff *skb, struct tcmsg *tcm)
-{
-	struct red_sched_data *q = qdisc_priv(sch);
-
-	if (cl != 1)
-		return -ENOENT;
-	tcm->tcm_handle |= TC_H_MIN(1);
-	tcm->tcm_info = q->qdisc->handle;
-	return 0;
-}
-
-static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-		     struct Qdisc **old)
-{
-	struct red_sched_data *q = qdisc_priv(sch);
-
-	if (new == NULL)
-		new = &noop_qdisc;
-
-	sch_tree_lock(sch);
-	*old = xchg(&q->qdisc, new);
-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
-	qdisc_reset(*old);
-	sch_tree_unlock(sch);
-	return 0;
-}
-
-static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
-{
-	struct red_sched_data *q = qdisc_priv(sch);
-	return q->qdisc;
-}
-
-static unsigned long red_get(struct Qdisc *sch, u32 classid)
-{
-	return 1;
-}
-
-static void red_put(struct Qdisc *sch, unsigned long arg)
-{
-	return;
-}
-
-static int red_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
-			    struct rtattr **tca, unsigned long *arg)
-{
-	return -ENOSYS;
-}
-
-static int red_delete(struct Qdisc *sch, unsigned long cl)
-{
-	return -ENOSYS;
-}
-
-static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
-{
-	if (!walker->stop) {
-		if (walker->count >= walker->skip)
-			if (walker->fn(sch, 1, walker) < 0) {
-				walker->stop = 1;
-				return;
-			}
-		walker->count++;
-	}
-}
-
-static struct tcf_proto **red_find_tcf(struct Qdisc *sch, unsigned long cl)
-{
-	return NULL;
-}
-
-static const struct Qdisc_class_ops red_class_ops = {
-	.graft		=	red_graft,
-	.leaf		=	red_leaf,
-	.get		=	red_get,
-	.put		=	red_put,
-	.change		=	red_change_class,
-	.delete		=	red_delete,
-	.walk		=	red_walk,
-	.tcf_chain	=	red_find_tcf,
-	.dump		=	red_dump_class,
-};
-
 static struct Qdisc_ops red_qdisc_ops __read_mostly = {
 	.id		=	"red",
 	.priv_size	=	sizeof(struct red_sched_data),
-	.cl_ops		=	&red_class_ops,
+	.cl_ops		=	&pseudo_classful_ops,
 	.enqueue	=	red_enqueue,
 	.dequeue	=	red_dequeue,
 	.requeue	=	red_requeue,
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 5fd4dff..6590ce3 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -99,6 +99,8 @@
 
 struct tbf_sched_data
 {
+	struct pc_sched_data	class;
+
 /* Parameters */
 	u32		limit;		/* Maximal length of backlog: bytes */
 	u32		buffer;		/* Token bucket depth/rate: MUST BE >= MTU/B */
@@ -111,7 +113,6 @@ struct tbf_sched_data
 	long	tokens;			/* Current number of B tokens */
 	long	ptokens;		/* Current number of P tokens */
 	psched_time_t	t_c;		/* Time check-point */
-	struct Qdisc	*qdisc;		/* Inner qdisc, default - bfifo queue */
 	struct qdisc_watchdog watchdog;	/* Watchdog timer */
 };
 
@@ -133,7 +134,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
 		return NET_XMIT_DROP;
 	}
 
-	ret = qdisc_enqueue(skb, q->qdisc);
+	ret = qdisc_enqueue(skb, q->class.qdisc);
 	if (ret != NET_XMIT_SUCCESS) {
 		sch->qstats.drops++;
 		return ret;
@@ -150,7 +151,7 @@ static int tbf_requeue(struct sk_buff *skb, struct Qdisc* sch)
 	struct tbf_sched_data *q = qdisc_priv(sch);
 	int ret;
 
-	ret = qdisc_requeue(skb, q->qdisc);
+	ret = qdisc_requeue(skb, q->class.qdisc);
 	if (ret == NET_XMIT_SUCCESS) {
 		sch->q.qlen++;
 		sch->qstats.requeues++;
@@ -164,7 +165,7 @@ static unsigned int tbf_drop(struct Qdisc* sch)
 	struct tbf_sched_data *q = qdisc_priv(sch);
 	unsigned int len = 0;
 
-	len = qdisc_drop(q->qdisc);
+	len = qdisc_drop(q->class.qdisc);
 	if (len > 0) {
 		sch->q.qlen--;
 		sch->qstats.drops++;
@@ -177,7 +178,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
 	struct tbf_sched_data *q = qdisc_priv(sch);
 	struct sk_buff *skb;
 
-	skb = qdisc_dequeue(q->qdisc);
+	skb = qdisc_dequeue(q->class.qdisc);
 
 	if (skb) {
 		psched_time_t now;
@@ -222,9 +223,9 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
 		   (cf. CSZ, HPFQ, HFSC)
 		 */
 
-		if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) {
+		if (q->class.qdisc->ops->requeue(skb, q->class.qdisc) != NET_XMIT_SUCCESS) {
 			/* When requeue fails skb is dropped */
-			qdisc_tree_decrease_qlen(q->qdisc, 1);
+			qdisc_tree_decrease_qlen(q->class.qdisc, 1);
 			sch->qstats.drops++;
 		}
 
@@ -237,7 +238,7 @@ static void tbf_reset(struct Qdisc* sch)
 {
 	struct tbf_sched_data *q = qdisc_priv(sch);
 
-	qdisc_reset(q->qdisc);
+	qdisc_reset(q->class.qdisc);
 	sch->q.qlen = 0;
 	q->t_c = psched_get_time();
 	q->tokens = q->buffer;
@@ -295,8 +296,8 @@ static int tbf_change(struct Qdisc* sch, struct rtattr *opt)
 
 	sch_tree_lock(sch);
 	if (child) {
-		qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
-		qdisc_destroy(xchg(&q->qdisc, child));
+		qdisc_tree_decrease_qlen(q->class.qdisc, q->class.qdisc->q.qlen);
+		qdisc_destroy(xchg(&q->class.qdisc, child));
 	}
 	q->limit = qopt->limit;
 	q->mtu = qopt->mtu;
@@ -325,7 +326,7 @@ static int tbf_init(struct Qdisc* sch, struct rtattr *opt)
 
 	q->t_c = psched_get_time();
 	qdisc_watchdog_init(&q->watchdog, sch);
-	q->qdisc = &noop_qdisc;
+	q->class.qdisc = &noop_qdisc;
 
 	return tbf_change(sch, opt);
 }
@@ -341,7 +342,7 @@ static void tbf_destroy(struct Qdisc *sch)
 	if (q->R_tab)
 		qdisc_put_rtab(q->R_tab);
 
-	qdisc_destroy(q->qdisc);
+	qdisc_destroy(q->class.qdisc);
 }
 
 static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -372,96 +373,8 @@ rtattr_failure:
 	return -1;
 }
 
-static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
-			  struct sk_buff *skb, struct tcmsg *tcm)
-{
-	struct tbf_sched_data *q = qdisc_priv(sch);
-
-	if (cl != 1) 	/* only one class */
-		return -ENOENT;
-
-	tcm->tcm_handle |= TC_H_MIN(1);
-	tcm->tcm_info = q->qdisc->handle;
-
-	return 0;
-}
-
-static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-		     struct Qdisc **old)
-{
-	struct tbf_sched_data *q = qdisc_priv(sch);
-
-	if (new == NULL)
-		new = &noop_qdisc;
-
-	sch_tree_lock(sch);
-	*old = xchg(&q->qdisc, new);
-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
-	qdisc_reset(*old);
-	sch_tree_unlock(sch);
-
-	return 0;
-}
-
-static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
-{
-	struct tbf_sched_data *q = qdisc_priv(sch);
-	return q->qdisc;
-}
-
-static unsigned long tbf_get(struct Qdisc *sch, u32 classid)
-{
-	return 1;
-}
-
-static void tbf_put(struct Qdisc *sch, unsigned long arg)
-{
-}
-
-static int tbf_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
-			    struct rtattr **tca, unsigned long *arg)
-{
-	return -ENOSYS;
-}
-
-static int tbf_delete(struct Qdisc *sch, unsigned long arg)
-{
-	return -ENOSYS;
-}
-
-static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
-{
-	if (!walker->stop) {
-		if (walker->count >= walker->skip)
-			if (walker->fn(sch, 1, walker) < 0) {
-				walker->stop = 1;
-				return;
-			}
-		walker->count++;
-	}
-}
-
-static struct tcf_proto **tbf_find_tcf(struct Qdisc *sch, unsigned long cl)
-{
-	return NULL;
-}
-
-static const struct Qdisc_class_ops tbf_class_ops =
-{
-	.graft		=	tbf_graft,
-	.leaf		=	tbf_leaf,
-	.get		=	tbf_get,
-	.put		=	tbf_put,
-	.change		=	tbf_change_class,
-	.delete		=	tbf_delete,
-	.walk		=	tbf_walk,
-	.tcf_chain	=	tbf_find_tcf,
-	.dump		=	tbf_dump_class,
-};
-
 static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
-	.next		=	NULL,
-	.cl_ops		=	&tbf_class_ops,
+	.cl_ops		=	&pseudo_classful_ops,
 	.id		=	"tbf",
 	.priv_size	=	sizeof(struct tbf_sched_data),
 	.enqueue	=	tbf_enqueue,

^ permalink raw reply related	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2008-01-20 18:32 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-01-20 18:28 [RFC NET_SCHED 00/05]: Pseudo-classful qdisc consolidation Patrick McHardy
2008-01-20 18:28 ` [RFC NET_SCHED 01/05]: Consolidate default fifo setup Patrick McHardy
2008-01-20 18:28 ` [RFC NET_SCHED 02/05]: Rename qdisc helpers for built-in queue Patrick McHardy
2008-01-20 18:28 ` [RFC NET_SCHED 03/05]: Introduce child qdisc helpers Patrick McHardy
2008-01-20 18:28 ` [RFC NET_SCHED 04/05]: Use " Patrick McHardy
2008-01-20 18:28 ` [RFC NET_SCHED 05/05]: Consolidate class ops for pseudo classful qdisc Patrick McHardy
2008-01-20 18:32   ` Patrick McHardy

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).