netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* pkt_sched: add DRR scheduler
@ 2008-11-19 14:26 Patrick McHardy
  2008-11-20 11:35 ` Jarek Poplawski
  0 siblings, 1 reply; 20+ messages in thread
From: Patrick McHardy @ 2008-11-19 14:26 UTC (permalink / raw)
  To: David S. Miller; +Cc: Linux Netdev List

[-- Attachment #1: Type: text/plain, Size: 0 bytes --]



[-- Attachment #2: 01.diff --]
[-- Type: text/x-patch, Size: 13950 bytes --]

commit 9a2114f922015699b3b8402941f9e444f4a84369
Author: Patrick McHardy <kaber@trash.net>
Date:   Wed Nov 19 15:25:03 2008 +0100

    pkt_sched: add DRR scheduler
    
    Add classful DRR scheduler as a more flexible replacement for SFQ.
    
    The main difference to the algorithm described in "Efficient Fair Queueing
    using Deficit Round Robin" is that this implementation doesn't drop packets
    from the longest queue on overrun because its classful and limits are
    handled by each individual child qdisc.
    
    Signed-off-by: Patrick McHardy <kaber@trash.net>

diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
index 5d921fa..e3f133a 100644
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -500,4 +500,20 @@ struct tc_netem_corrupt
 
 #define NETEM_DIST_SCALE	8192
 
+/* DRR */
+
+enum
+{
+	TCA_DRR_UNSPEC,
+	TCA_DRR_QUANTUM,
+	__TCA_DRR_MAX
+};
+
+#define TCA_DRR_MAX	(__TCA_DRR_MAX - 1)
+
+struct tc_drr_stats
+{
+	u32	deficit;
+};
+
 #endif
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 36543b6..ba31fe3 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -194,6 +194,17 @@ config NET_SCH_NETEM
 
 	  If unsure, say N.
 
+config NET_SCH_DRR
+	tristate "Deficit round robing scheduler (DRR)"
+	help
+	  Say Y here if you want to use the Deficit round robin (DRR) packet
+	  scheduling algorithm.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called sch_drr.
+
+	  If unsure, say N.
+
 config NET_SCH_INGRESS
 	tristate "Ingress Qdisc"
 	depends on NET_CLS_ACT
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 70b35f8..54d950c 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_NET_SCH_PRIO)	+= sch_prio.o
 obj-$(CONFIG_NET_SCH_MULTIQ)	+= sch_multiq.o
 obj-$(CONFIG_NET_SCH_ATM)	+= sch_atm.o
 obj-$(CONFIG_NET_SCH_NETEM)	+= sch_netem.o
+obj-$(CONFIG_NET_SCH_DRR)	+= sch_drr.o
 obj-$(CONFIG_NET_CLS_U32)	+= cls_u32.o
 obj-$(CONFIG_NET_CLS_ROUTE4)	+= cls_route.o
 obj-$(CONFIG_NET_CLS_FW)	+= cls_fw.o
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
new file mode 100644
index 0000000..dd075ef
--- /dev/null
+++ b/net/sched/sch_drr.c
@@ -0,0 +1,505 @@
+/*
+ * net/sched/sch_drr.c         Deficit Round Robin scheduler
+ *
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/pkt_sched.h>
+#include <net/sch_generic.h>
+#include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
+
+struct drr_class {
+	struct Qdisc_class_common	common;
+	unsigned int			refcnt;
+	unsigned int			filter_cnt;
+
+	struct gnet_stats_basic		bstats;
+	struct gnet_stats_queue		qstats;
+	struct gnet_stats_rate_est	rate_est;
+	struct list_head		alist;
+	struct Qdisc *			qdisc;
+
+	u32				quantum;
+	u32				deficit;
+};
+
+struct drr_sched {
+	struct list_head		active;
+	struct tcf_proto *		filter_list;
+	struct Qdisc_class_hash		clhash;
+};
+
+static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
+{
+	struct drr_sched *q = qdisc_priv(sch);
+	struct Qdisc_class_common *clc;
+
+	clc = qdisc_class_find(&q->clhash, classid);
+	if (clc == NULL)
+		return NULL;
+	return container_of(clc, struct drr_class, common);
+}
+
+static void drr_purge_queue(struct drr_class *cl)
+{
+	unsigned int len = cl->qdisc->q.qlen;
+
+	qdisc_reset(cl->qdisc);
+	qdisc_tree_decrease_qlen(cl->qdisc, len);
+}
+
+static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
+	[TCA_DRR_QUANTUM]	= { .type = NLA_U32 },
+};
+
+static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+			    struct nlattr **tca, unsigned long *arg)
+{
+	struct drr_sched *q = qdisc_priv(sch);
+	struct drr_class *cl = (struct drr_class *)*arg;
+	struct nlattr *tb[TCA_DRR_MAX + 1];
+	u32 quantum;
+	int err;
+
+	err = nla_parse_nested(tb, TCA_DRR_MAX, tca[TCA_OPTIONS], drr_policy);
+	if (err < 0)
+		return err;
+
+	if (tb[TCA_DRR_QUANTUM]) {
+		quantum = nla_get_u32(tb[TCA_DRR_QUANTUM]);
+		if (quantum == 0)
+			return -EINVAL;
+	} else
+		quantum = psched_mtu(qdisc_dev(sch));
+
+	if (cl != NULL) {
+		sch_tree_lock(sch);
+		if (tb[TCA_DRR_QUANTUM])
+			cl->quantum = quantum;
+		sch_tree_unlock(sch);
+
+		if (tca[TCA_RATE])
+			gen_replace_estimator(&cl->bstats, &cl->rate_est,
+					      qdisc_root_sleeping_lock(sch),
+					      tca[TCA_RATE]);
+		return 0;
+	}
+
+	cl = kzalloc(sizeof(struct drr_class), GFP_KERNEL);
+	if (cl == NULL)
+		return -ENOBUFS;
+
+	cl->refcnt	   = 1;
+	cl->common.classid = classid;
+	cl->quantum	   = quantum;
+	cl->qdisc	   = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
+					       &pfifo_qdisc_ops, classid);
+	if (cl->qdisc == NULL)
+		cl->qdisc = &noop_qdisc;
+
+	if (tca[TCA_RATE])
+		gen_replace_estimator(&cl->bstats, &cl->rate_est,
+				      qdisc_root_sleeping_lock(sch),
+				      tca[TCA_RATE]);
+
+	sch_tree_lock(sch);
+	qdisc_class_hash_insert(&q->clhash, &cl->common);
+	sch_tree_unlock(sch);
+
+	qdisc_class_hash_grow(sch, &q->clhash);
+
+	*arg = (unsigned long)cl;
+	return 0;
+}
+
+static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl)
+{
+	gen_kill_estimator(&cl->bstats, &cl->rate_est);
+	qdisc_destroy(cl->qdisc);
+	kfree(cl);
+}
+
+static int drr_delete_class(struct Qdisc *sch, unsigned long arg)
+{
+	struct drr_sched *q = qdisc_priv(sch);
+	struct drr_class *cl = (struct drr_class *)arg;
+
+	if (cl->filter_cnt > 0)
+		return -EBUSY;
+
+	sch_tree_lock(sch);
+
+	drr_purge_queue(cl);
+	qdisc_class_hash_remove(&q->clhash, &cl->common);
+
+	if (--cl->refcnt == 0)
+		drr_destroy_class(sch, cl);
+
+	sch_tree_unlock(sch);
+	return 0;
+}
+
+static unsigned long drr_get_class(struct Qdisc *sch, u32 classid)
+{
+	struct drr_class *cl = drr_find_class(sch, classid);
+
+	if (cl != NULL)
+		cl->refcnt++;
+
+	return (unsigned long)cl;
+}
+
+static void drr_put_class(struct Qdisc *sch, unsigned long arg)
+{
+	struct drr_class *cl = (struct drr_class *)arg;
+
+	if (--cl->refcnt == 0)
+		drr_destroy_class(sch, cl);
+}
+
+static struct tcf_proto **drr_tcf_chain(struct Qdisc *sch, unsigned long cl)
+{
+	struct drr_sched *q = qdisc_priv(sch);
+
+	if (cl)
+		return NULL;
+
+	return &q->filter_list;
+}
+
+static unsigned long drr_bind_tcf(struct Qdisc *sch, unsigned long parent,
+				  u32 classid)
+{
+	struct drr_class *cl = drr_find_class(sch, classid);
+
+	if (cl != NULL)
+		cl->filter_cnt++;
+
+	return (unsigned long)cl;
+}
+
+static void drr_unbind_tcf(struct Qdisc *sch, unsigned long arg)
+{
+	struct drr_class *cl = (struct drr_class *)arg;
+
+	cl->filter_cnt--;
+}
+
+static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
+			   struct Qdisc *new, struct Qdisc **old)
+{
+	struct drr_class *cl = (struct drr_class *)arg;
+
+	if (new == NULL) {
+		new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
+					&pfifo_qdisc_ops, cl->common.classid);
+		if (new == NULL)
+			new = &noop_qdisc;
+	}
+
+	sch_tree_lock(sch);
+	drr_purge_queue(cl);
+	*old = xchg(&cl->qdisc, new);
+	sch_tree_unlock(sch);
+	return 0;
+}
+
+static struct Qdisc *drr_class_leaf(struct Qdisc *sch, unsigned long arg)
+{
+	struct drr_class *cl = (struct drr_class *)arg;
+
+	return cl->qdisc;
+}
+
+static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg)
+{
+	struct drr_class *cl = (struct drr_class *)arg;
+
+	if (cl->qdisc->q.qlen == 0)
+		list_del(&cl->alist);
+}
+
+static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
+			  struct sk_buff *skb, struct tcmsg *tcm)
+{
+	struct drr_class *cl = (struct drr_class *)arg;
+	struct nlattr *nest;
+
+	tcm->tcm_parent	= TC_H_ROOT;
+	tcm->tcm_handle	= cl->common.classid;
+	tcm->tcm_info	= cl->qdisc->handle;
+
+	nest = nla_nest_start(skb, TCA_OPTIONS);
+	if (nest == NULL)
+		goto nla_put_failure;
+	NLA_PUT_U32(skb, TCA_DRR_QUANTUM, cl->quantum);
+	return nla_nest_end(skb, nest);
+
+nla_put_failure:
+	nla_nest_cancel(skb, nest);
+	return -EMSGSIZE;
+}
+
+static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
+				struct gnet_dump *d)
+{
+	struct drr_class *cl = (struct drr_class *)arg;
+	struct tc_drr_stats xstats;
+	
+	memset(&xstats, 0, sizeof(xstats));
+	if (cl->qdisc->q.qlen)
+		xstats.deficit = cl->deficit;
+
+	if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
+	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
+	    gnet_stats_copy_queue(d, &cl->qdisc->qstats) < 0)
+		return -1;
+
+	return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
+}
+
+static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+{
+	struct drr_sched *q = qdisc_priv(sch);
+	struct drr_class *cl;
+	struct hlist_node *n;
+	unsigned int i;
+
+	if (arg->stop)
+		return;
+
+	for (i = 0; i < q->clhash.hashsize; i++) {
+		hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
+			if (arg->count < arg->skip) {
+				arg->count++;
+				continue;
+			}
+			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
+				arg->stop = 1;
+				return;
+			}
+			arg->count++;
+		}
+	}
+}
+
+static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
+				      int *qerr)
+{
+	struct drr_sched *q = qdisc_priv(sch);
+	struct drr_class *cl;
+	struct tcf_result res;
+	int result;
+
+	if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
+		cl = drr_find_class(sch, skb->priority);
+		if (cl != NULL)
+			return cl;
+	}
+
+	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+	result = tc_classify(skb, q->filter_list, &res);
+	if (result >= 0) {
+#ifdef CONFIG_NET_CLS_ACT
+		switch (result) {
+		case TC_ACT_QUEUED:
+		case TC_ACT_STOLEN:
+			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
+		case TC_ACT_SHOT:
+			return NULL;
+		}
+#endif
+		cl = (struct drr_class *)res.class;
+		if (cl == NULL)
+			cl = drr_find_class(sch, res.classid);
+		return cl;
+	}
+	return NULL;
+}
+
+static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+	struct drr_sched *q = qdisc_priv(sch);
+	struct drr_class *cl;
+	unsigned int len;
+	int err;
+
+	cl = drr_classify(skb, sch, &err);
+	if (cl == NULL) {
+		if (err & __NET_XMIT_BYPASS)
+			sch->qstats.drops++;
+		kfree_skb(skb);
+		return err;
+	}
+
+	len = qdisc_pkt_len(skb);
+	err = qdisc_enqueue(skb, cl->qdisc);
+	if (unlikely(err != NET_XMIT_SUCCESS)) {
+		if (net_xmit_drop_count(err)) {
+			cl->qstats.drops++;
+			sch->qstats.drops++;
+		}
+		return err;
+	}
+
+	if (cl->qdisc->q.qlen == 1) {
+		list_add_tail(&cl->alist, &q->active);
+		cl->deficit = cl->quantum;
+	}
+
+	cl->bstats.packets++;
+	cl->bstats.bytes += len;
+	sch->bstats.packets++;
+	sch->bstats.bytes += len;
+
+	sch->q.qlen++;
+	return err;
+}
+
+static struct sk_buff *drr_dequeue(struct Qdisc *sch)
+{
+	struct drr_sched *q = qdisc_priv(sch);
+	struct drr_class *cl;
+	struct sk_buff *skb;
+	unsigned int len;
+
+	while (!list_empty(&q->active)) {
+		cl = list_first_entry(&q->active, struct drr_class, alist);
+		skb = cl->qdisc->ops->peek(cl->qdisc);
+		if (skb == NULL)
+			goto skip;
+
+		len = qdisc_pkt_len(skb);
+		if (len <= cl->deficit) {
+			cl->deficit -= len;
+			skb = qdisc_dequeue_peeked(cl->qdisc);
+			if (cl->qdisc->q.qlen == 0)
+				list_del(&cl->alist);
+			sch->q.qlen--;
+			return skb;
+		}
+
+		cl->deficit += cl->quantum;
+skip:
+		list_move_tail(&cl->alist, &q->active);
+	}
+	return NULL;
+}
+
+static unsigned int drr_drop(struct Qdisc *sch)
+{
+	struct drr_sched *q = qdisc_priv(sch);
+	struct drr_class *cl;
+	unsigned int len;
+
+	list_for_each_entry(cl, &q->active, alist) {
+		if (cl->qdisc->ops->drop) {
+			len = cl->qdisc->ops->drop(cl->qdisc);
+			if (len > 0) {
+				if (cl->qdisc->q.qlen == 0)
+					list_del(&cl->alist);
+				return len;
+			}
+		}
+	}
+	return 0;
+}
+
+static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
+{
+	struct drr_sched *q = qdisc_priv(sch);
+	int err;
+
+	err = qdisc_class_hash_init(&q->clhash);
+	if (err < 0)
+		return err;
+	INIT_LIST_HEAD(&q->active);
+	return 0;
+}
+
+static void drr_reset_qdisc(struct Qdisc *sch)
+{
+	struct drr_sched *q = qdisc_priv(sch);
+	struct drr_class *cl;
+	struct hlist_node *n;
+	unsigned int i;
+
+	for (i = 0; i < q->clhash.hashsize; i++) {
+		hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
+			if (cl->qdisc->q.qlen)
+				list_del(&cl->alist);
+			qdisc_reset(cl->qdisc);
+		}
+	}
+	sch->q.qlen = 0;
+}
+
+static void drr_destroy_qdisc(struct Qdisc *sch)
+{
+	struct drr_sched *q = qdisc_priv(sch);
+	struct drr_class *cl;
+	struct hlist_node *n, *next;
+	unsigned int i;
+
+	tcf_destroy_chain(&q->filter_list);
+
+	for (i = 0; i < q->clhash.hashsize; i++) {
+		hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
+					  common.hnode)
+			drr_destroy_class(sch, cl);
+	}
+	qdisc_class_hash_destroy(&q->clhash);
+}
+
+static const struct Qdisc_class_ops drr_class_ops = {
+	.change		= drr_change_class,
+	.delete		= drr_delete_class,
+	.get		= drr_get_class,
+	.put		= drr_put_class,
+	.tcf_chain	= drr_tcf_chain,
+	.bind_tcf	= drr_bind_tcf,
+	.unbind_tcf	= drr_unbind_tcf,
+	.graft		= drr_graft_class,
+	.leaf		= drr_class_leaf,
+	.qlen_notify	= drr_qlen_notify,
+	.dump		= drr_dump_class,
+	.dump_stats	= drr_dump_class_stats,
+	.walk		= drr_walk,
+};
+
+static struct Qdisc_ops drr_qdisc_ops __read_mostly = {
+	.cl_ops		= &drr_class_ops,
+	.id		= "drr",
+	.priv_size	= sizeof(struct drr_sched),
+	.enqueue	= drr_enqueue,
+	.dequeue	= drr_dequeue,
+	.peek		= qdisc_peek_dequeued,
+	.drop		= drr_drop,
+	.init		= drr_init_qdisc,
+	.reset		= drr_reset_qdisc,
+	.destroy	= drr_destroy_qdisc,
+	.owner		= THIS_MODULE,
+};
+
+static int __init drr_init(void)
+{
+	return register_qdisc(&drr_qdisc_ops);
+}
+
+static void __exit drr_exit(void)
+{
+	unregister_qdisc(&drr_qdisc_ops);
+}
+
+module_init(drr_init);
+module_exit(drr_exit);
+MODULE_LICENSE("GPL");

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* Re: pkt_sched: add DRR scheduler
  2008-11-19 14:26 pkt_sched: add DRR scheduler Patrick McHardy
@ 2008-11-20 11:35 ` Jarek Poplawski
  2008-11-20 11:42   ` Patrick McHardy
  0 siblings, 1 reply; 20+ messages in thread
From: Jarek Poplawski @ 2008-11-20 11:35 UTC (permalink / raw)
  To: Patrick McHardy; +Cc: David S. Miller, Linux Netdev List

On 19-11-2008 15:26, Patrick McHardy wrote:

...
> diff --git a/net/sched/Kconfig b/net/sched/Kconfig
> index 36543b6..ba31fe3 100644
> --- a/net/sched/Kconfig
> +++ b/net/sched/Kconfig
> @@ -194,6 +194,17 @@ config NET_SCH_NETEM
>  
>  	  If unsure, say N.
>  
> +config NET_SCH_DRR
> +	tristate "Deficit round robing scheduler (DRR)"

 -	tristate "Deficit round robing scheduler (DRR)"
 +	tristate "Deficit Round Robin scheduler (DRR)"

> +	help
> +	  Say Y here if you want to use the Deficit round robin (DRR) packet

 -	  Say Y here if you want to use the Deficit round robin (DRR) packet
 +	  Say Y here if you want to use the Deficit Round Robin (DRR) packet

> +	  scheduling algorithm.

Looks OK to me except checkpatch and documentation/example (any... even
at the beginning of the program).

Jarek P.

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: pkt_sched: add DRR scheduler
  2008-11-20 11:35 ` Jarek Poplawski
@ 2008-11-20 11:42   ` Patrick McHardy
  2008-11-20 11:51     ` Jarek Poplawski
                       ` (4 more replies)
  0 siblings, 5 replies; 20+ messages in thread
From: Patrick McHardy @ 2008-11-20 11:42 UTC (permalink / raw)
  To: Jarek Poplawski; +Cc: David S. Miller, Linux Netdev List

[-- Attachment #1: Type: text/plain, Size: 513 bytes --]

Jarek Poplawski wrote:
> On 19-11-2008 15:26, Patrick McHardy wrote:
>   
>> +config NET_SCH_DRR
>> +	tristate "Deficit round robing scheduler (DRR)"
>>     
>
>  -	tristate "Deficit round robing scheduler (DRR)"
>  +	tristate "Deficit Round Robin scheduler (DRR)"
>
>   
Thanks, I've fixed the typos.
> Looks OK to me except checkpatch and documentation/example (any... even
> at the beginning of the program).
And the checkpatch errors.

Documentation for this in the kernel tree seems pretty useless though.



[-- Attachment #2: 01.diff --]
[-- Type: text/x-diff, Size: 13946 bytes --]

commit c6e712b1cf9446096159414cb9d7f8c58effe92f
Author: Patrick McHardy <kaber@trash.net>
Date:   Thu Nov 20 12:39:18 2008 +0100

    pkt_sched: add DRR scheduler
    
    Add classful DRR scheduler as a more flexible replacement for SFQ.
    
    The main difference to the algorithm described in "Efficient Fair Queueing
    using Deficit Round Robin" is that this implementation doesn't drop packets
    from the longest queue on overrun because its classful and limits are
    handled by each individual child qdisc.
    
    Signed-off-by: Patrick McHardy <kaber@trash.net>

diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
index 5d921fa..e3f133a 100644
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -500,4 +500,20 @@ struct tc_netem_corrupt
 
 #define NETEM_DIST_SCALE	8192
 
+/* DRR */
+
+enum
+{
+	TCA_DRR_UNSPEC,
+	TCA_DRR_QUANTUM,
+	__TCA_DRR_MAX
+};
+
+#define TCA_DRR_MAX	(__TCA_DRR_MAX - 1)
+
+struct tc_drr_stats
+{
+	u32	deficit;
+};
+
 #endif
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 36543b6..4f7ef0d 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -194,6 +194,17 @@ config NET_SCH_NETEM
 
 	  If unsure, say N.
 
+config NET_SCH_DRR
+	tristate "Deficit Round Robin scheduler (DRR)"
+	help
+	  Say Y here if you want to use the Deficit Round Robin (DRR) packet
+	  scheduling algorithm.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called sch_drr.
+
+	  If unsure, say N.
+
 config NET_SCH_INGRESS
 	tristate "Ingress Qdisc"
 	depends on NET_CLS_ACT
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 70b35f8..54d950c 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_NET_SCH_PRIO)	+= sch_prio.o
 obj-$(CONFIG_NET_SCH_MULTIQ)	+= sch_multiq.o
 obj-$(CONFIG_NET_SCH_ATM)	+= sch_atm.o
 obj-$(CONFIG_NET_SCH_NETEM)	+= sch_netem.o
+obj-$(CONFIG_NET_SCH_DRR)	+= sch_drr.o
 obj-$(CONFIG_NET_CLS_U32)	+= cls_u32.o
 obj-$(CONFIG_NET_CLS_ROUTE4)	+= cls_route.o
 obj-$(CONFIG_NET_CLS_FW)	+= cls_fw.o
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
new file mode 100644
index 0000000..e71a5de
--- /dev/null
+++ b/net/sched/sch_drr.c
@@ -0,0 +1,505 @@
+/*
+ * net/sched/sch_drr.c         Deficit Round Robin scheduler
+ *
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/pkt_sched.h>
+#include <net/sch_generic.h>
+#include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
+
+struct drr_class {
+	struct Qdisc_class_common	common;
+	unsigned int			refcnt;
+	unsigned int			filter_cnt;
+
+	struct gnet_stats_basic		bstats;
+	struct gnet_stats_queue		qstats;
+	struct gnet_stats_rate_est	rate_est;
+	struct list_head		alist;
+	struct Qdisc			*qdisc;
+
+	u32				quantum;
+	u32				deficit;
+};
+
+struct drr_sched {
+	struct list_head		active;
+	struct tcf_proto		*filter_list;
+	struct Qdisc_class_hash		clhash;
+};
+
+static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
+{
+	struct drr_sched *q = qdisc_priv(sch);
+	struct Qdisc_class_common *clc;
+
+	clc = qdisc_class_find(&q->clhash, classid);
+	if (clc == NULL)
+		return NULL;
+	return container_of(clc, struct drr_class, common);
+}
+
+static void drr_purge_queue(struct drr_class *cl)
+{
+	unsigned int len = cl->qdisc->q.qlen;
+
+	qdisc_reset(cl->qdisc);
+	qdisc_tree_decrease_qlen(cl->qdisc, len);
+}
+
+static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
+	[TCA_DRR_QUANTUM]	= { .type = NLA_U32 },
+};
+
+static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+			    struct nlattr **tca, unsigned long *arg)
+{
+	struct drr_sched *q = qdisc_priv(sch);
+	struct drr_class *cl = (struct drr_class *)*arg;
+	struct nlattr *tb[TCA_DRR_MAX + 1];
+	u32 quantum;
+	int err;
+
+	err = nla_parse_nested(tb, TCA_DRR_MAX, tca[TCA_OPTIONS], drr_policy);
+	if (err < 0)
+		return err;
+
+	if (tb[TCA_DRR_QUANTUM]) {
+		quantum = nla_get_u32(tb[TCA_DRR_QUANTUM]);
+		if (quantum == 0)
+			return -EINVAL;
+	} else
+		quantum = psched_mtu(qdisc_dev(sch));
+
+	if (cl != NULL) {
+		sch_tree_lock(sch);
+		if (tb[TCA_DRR_QUANTUM])
+			cl->quantum = quantum;
+		sch_tree_unlock(sch);
+
+		if (tca[TCA_RATE])
+			gen_replace_estimator(&cl->bstats, &cl->rate_est,
+					      qdisc_root_sleeping_lock(sch),
+					      tca[TCA_RATE]);
+		return 0;
+	}
+
+	cl = kzalloc(sizeof(struct drr_class), GFP_KERNEL);
+	if (cl == NULL)
+		return -ENOBUFS;
+
+	cl->refcnt	   = 1;
+	cl->common.classid = classid;
+	cl->quantum	   = quantum;
+	cl->qdisc	   = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
+					       &pfifo_qdisc_ops, classid);
+	if (cl->qdisc == NULL)
+		cl->qdisc = &noop_qdisc;
+
+	if (tca[TCA_RATE])
+		gen_replace_estimator(&cl->bstats, &cl->rate_est,
+				      qdisc_root_sleeping_lock(sch),
+				      tca[TCA_RATE]);
+
+	sch_tree_lock(sch);
+	qdisc_class_hash_insert(&q->clhash, &cl->common);
+	sch_tree_unlock(sch);
+
+	qdisc_class_hash_grow(sch, &q->clhash);
+
+	*arg = (unsigned long)cl;
+	return 0;
+}
+
+static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl)
+{
+	gen_kill_estimator(&cl->bstats, &cl->rate_est);
+	qdisc_destroy(cl->qdisc);
+	kfree(cl);
+}
+
+static int drr_delete_class(struct Qdisc *sch, unsigned long arg)
+{
+	struct drr_sched *q = qdisc_priv(sch);
+	struct drr_class *cl = (struct drr_class *)arg;
+
+	if (cl->filter_cnt > 0)
+		return -EBUSY;
+
+	sch_tree_lock(sch);
+
+	drr_purge_queue(cl);
+	qdisc_class_hash_remove(&q->clhash, &cl->common);
+
+	if (--cl->refcnt == 0)
+		drr_destroy_class(sch, cl);
+
+	sch_tree_unlock(sch);
+	return 0;
+}
+
+static unsigned long drr_get_class(struct Qdisc *sch, u32 classid)
+{
+	struct drr_class *cl = drr_find_class(sch, classid);
+
+	if (cl != NULL)
+		cl->refcnt++;
+
+	return (unsigned long)cl;
+}
+
+static void drr_put_class(struct Qdisc *sch, unsigned long arg)
+{
+	struct drr_class *cl = (struct drr_class *)arg;
+
+	if (--cl->refcnt == 0)
+		drr_destroy_class(sch, cl);
+}
+
+static struct tcf_proto **drr_tcf_chain(struct Qdisc *sch, unsigned long cl)
+{
+	struct drr_sched *q = qdisc_priv(sch);
+
+	if (cl)
+		return NULL;
+
+	return &q->filter_list;
+}
+
+static unsigned long drr_bind_tcf(struct Qdisc *sch, unsigned long parent,
+				  u32 classid)
+{
+	struct drr_class *cl = drr_find_class(sch, classid);
+
+	if (cl != NULL)
+		cl->filter_cnt++;
+
+	return (unsigned long)cl;
+}
+
+static void drr_unbind_tcf(struct Qdisc *sch, unsigned long arg)
+{
+	struct drr_class *cl = (struct drr_class *)arg;
+
+	cl->filter_cnt--;
+}
+
+static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
+			   struct Qdisc *new, struct Qdisc **old)
+{
+	struct drr_class *cl = (struct drr_class *)arg;
+
+	if (new == NULL) {
+		new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
+					&pfifo_qdisc_ops, cl->common.classid);
+		if (new == NULL)
+			new = &noop_qdisc;
+	}
+
+	sch_tree_lock(sch);
+	drr_purge_queue(cl);
+	*old = xchg(&cl->qdisc, new);
+	sch_tree_unlock(sch);
+	return 0;
+}
+
+static struct Qdisc *drr_class_leaf(struct Qdisc *sch, unsigned long arg)
+{
+	struct drr_class *cl = (struct drr_class *)arg;
+
+	return cl->qdisc;
+}
+
+static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg)
+{
+	struct drr_class *cl = (struct drr_class *)arg;
+
+	if (cl->qdisc->q.qlen == 0)
+		list_del(&cl->alist);
+}
+
+static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
+			  struct sk_buff *skb, struct tcmsg *tcm)
+{
+	struct drr_class *cl = (struct drr_class *)arg;
+	struct nlattr *nest;
+
+	tcm->tcm_parent	= TC_H_ROOT;
+	tcm->tcm_handle	= cl->common.classid;
+	tcm->tcm_info	= cl->qdisc->handle;
+
+	nest = nla_nest_start(skb, TCA_OPTIONS);
+	if (nest == NULL)
+		goto nla_put_failure;
+	NLA_PUT_U32(skb, TCA_DRR_QUANTUM, cl->quantum);
+	return nla_nest_end(skb, nest);
+
+nla_put_failure:
+	nla_nest_cancel(skb, nest);
+	return -EMSGSIZE;
+}
+
+static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
+				struct gnet_dump *d)
+{
+	struct drr_class *cl = (struct drr_class *)arg;
+	struct tc_drr_stats xstats;
+
+	memset(&xstats, 0, sizeof(xstats));
+	if (cl->qdisc->q.qlen)
+		xstats.deficit = cl->deficit;
+
+	if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
+	    gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
+	    gnet_stats_copy_queue(d, &cl->qdisc->qstats) < 0)
+		return -1;
+
+	return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
+}
+
+static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+{
+	struct drr_sched *q = qdisc_priv(sch);
+	struct drr_class *cl;
+	struct hlist_node *n;
+	unsigned int i;
+
+	if (arg->stop)
+		return;
+
+	for (i = 0; i < q->clhash.hashsize; i++) {
+		hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
+			if (arg->count < arg->skip) {
+				arg->count++;
+				continue;
+			}
+			if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
+				arg->stop = 1;
+				return;
+			}
+			arg->count++;
+		}
+	}
+}
+
+static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
+				      int *qerr)
+{
+	struct drr_sched *q = qdisc_priv(sch);
+	struct drr_class *cl;
+	struct tcf_result res;
+	int result;
+
+	if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
+		cl = drr_find_class(sch, skb->priority);
+		if (cl != NULL)
+			return cl;
+	}
+
+	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+	result = tc_classify(skb, q->filter_list, &res);
+	if (result >= 0) {
+#ifdef CONFIG_NET_CLS_ACT
+		switch (result) {
+		case TC_ACT_QUEUED:
+		case TC_ACT_STOLEN:
+			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
+		case TC_ACT_SHOT:
+			return NULL;
+		}
+#endif
+		cl = (struct drr_class *)res.class;
+		if (cl == NULL)
+			cl = drr_find_class(sch, res.classid);
+		return cl;
+	}
+	return NULL;
+}
+
+static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+	struct drr_sched *q = qdisc_priv(sch);
+	struct drr_class *cl;
+	unsigned int len;
+	int err;
+
+	cl = drr_classify(skb, sch, &err);
+	if (cl == NULL) {
+		if (err & __NET_XMIT_BYPASS)
+			sch->qstats.drops++;
+		kfree_skb(skb);
+		return err;
+	}
+
+	len = qdisc_pkt_len(skb);
+	err = qdisc_enqueue(skb, cl->qdisc);
+	if (unlikely(err != NET_XMIT_SUCCESS)) {
+		if (net_xmit_drop_count(err)) {
+			cl->qstats.drops++;
+			sch->qstats.drops++;
+		}
+		return err;
+	}
+
+	if (cl->qdisc->q.qlen == 1) {
+		list_add_tail(&cl->alist, &q->active);
+		cl->deficit = cl->quantum;
+	}
+
+	cl->bstats.packets++;
+	cl->bstats.bytes += len;
+	sch->bstats.packets++;
+	sch->bstats.bytes += len;
+
+	sch->q.qlen++;
+	return err;
+}
+
+static struct sk_buff *drr_dequeue(struct Qdisc *sch)
+{
+	struct drr_sched *q = qdisc_priv(sch);
+	struct drr_class *cl;
+	struct sk_buff *skb;
+	unsigned int len;
+
+	while (!list_empty(&q->active)) {
+		cl = list_first_entry(&q->active, struct drr_class, alist);
+		skb = cl->qdisc->ops->peek(cl->qdisc);
+		if (skb == NULL)
+			goto skip;
+
+		len = qdisc_pkt_len(skb);
+		if (len <= cl->deficit) {
+			cl->deficit -= len;
+			skb = qdisc_dequeue_peeked(cl->qdisc);
+			if (cl->qdisc->q.qlen == 0)
+				list_del(&cl->alist);
+			sch->q.qlen--;
+			return skb;
+		}
+
+		cl->deficit += cl->quantum;
+skip:
+		list_move_tail(&cl->alist, &q->active);
+	}
+	return NULL;
+}
+
+static unsigned int drr_drop(struct Qdisc *sch)
+{
+	struct drr_sched *q = qdisc_priv(sch);
+	struct drr_class *cl;
+	unsigned int len;
+
+	list_for_each_entry(cl, &q->active, alist) {
+		if (cl->qdisc->ops->drop) {
+			len = cl->qdisc->ops->drop(cl->qdisc);
+			if (len > 0) {
+				if (cl->qdisc->q.qlen == 0)
+					list_del(&cl->alist);
+				return len;
+			}
+		}
+	}
+	return 0;
+}
+
+static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
+{
+	struct drr_sched *q = qdisc_priv(sch);
+	int err;
+
+	err = qdisc_class_hash_init(&q->clhash);
+	if (err < 0)
+		return err;
+	INIT_LIST_HEAD(&q->active);
+	return 0;
+}
+
+static void drr_reset_qdisc(struct Qdisc *sch)
+{
+	struct drr_sched *q = qdisc_priv(sch);
+	struct drr_class *cl;
+	struct hlist_node *n;
+	unsigned int i;
+
+	for (i = 0; i < q->clhash.hashsize; i++) {
+		hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
+			if (cl->qdisc->q.qlen)
+				list_del(&cl->alist);
+			qdisc_reset(cl->qdisc);
+		}
+	}
+	sch->q.qlen = 0;
+}
+
+static void drr_destroy_qdisc(struct Qdisc *sch)
+{
+	struct drr_sched *q = qdisc_priv(sch);
+	struct drr_class *cl;
+	struct hlist_node *n, *next;
+	unsigned int i;
+
+	tcf_destroy_chain(&q->filter_list);
+
+	for (i = 0; i < q->clhash.hashsize; i++) {
+		hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
+					  common.hnode)
+			drr_destroy_class(sch, cl);
+	}
+	qdisc_class_hash_destroy(&q->clhash);
+}
+
+static const struct Qdisc_class_ops drr_class_ops = {
+	.change		= drr_change_class,
+	.delete		= drr_delete_class,
+	.get		= drr_get_class,
+	.put		= drr_put_class,
+	.tcf_chain	= drr_tcf_chain,
+	.bind_tcf	= drr_bind_tcf,
+	.unbind_tcf	= drr_unbind_tcf,
+	.graft		= drr_graft_class,
+	.leaf		= drr_class_leaf,
+	.qlen_notify	= drr_qlen_notify,
+	.dump		= drr_dump_class,
+	.dump_stats	= drr_dump_class_stats,
+	.walk		= drr_walk,
+};
+
+static struct Qdisc_ops drr_qdisc_ops __read_mostly = {
+	.cl_ops		= &drr_class_ops,
+	.id		= "drr",
+	.priv_size	= sizeof(struct drr_sched),
+	.enqueue	= drr_enqueue,
+	.dequeue	= drr_dequeue,
+	.peek		= qdisc_peek_dequeued,
+	.drop		= drr_drop,
+	.init		= drr_init_qdisc,
+	.reset		= drr_reset_qdisc,
+	.destroy	= drr_destroy_qdisc,
+	.owner		= THIS_MODULE,
+};
+
+static int __init drr_init(void)
+{
+	return register_qdisc(&drr_qdisc_ops);
+}
+
+static void __exit drr_exit(void)
+{
+	unregister_qdisc(&drr_qdisc_ops);
+}
+
+module_init(drr_init);
+module_exit(drr_exit);
+MODULE_LICENSE("GPL");

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* Re: pkt_sched: add DRR scheduler
  2008-11-20 11:42   ` Patrick McHardy
@ 2008-11-20 11:51     ` Jarek Poplawski
  2008-11-20 11:58       ` Patrick McHardy
  2008-11-20 12:10     ` David Miller
                       ` (3 subsequent siblings)
  4 siblings, 1 reply; 20+ messages in thread
From: Jarek Poplawski @ 2008-11-20 11:51 UTC (permalink / raw)
  To: Patrick McHardy; +Cc: David S. Miller, Linux Netdev List

On Thu, Nov 20, 2008 at 12:42:58PM +0100, Patrick McHardy wrote:
...
> Documentation for this in the kernel tree seems pretty useless though.

Right! What about iproute2 tree? (examples/?)

Jarek P.

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: pkt_sched: add DRR scheduler
  2008-11-20 11:51     ` Jarek Poplawski
@ 2008-11-20 11:58       ` Patrick McHardy
  2008-11-20 12:06         ` Jarek Poplawski
  0 siblings, 1 reply; 20+ messages in thread
From: Patrick McHardy @ 2008-11-20 11:58 UTC (permalink / raw)
  To: Jarek Poplawski; +Cc: David S. Miller, Linux Netdev List

Jarek Poplawski wrote:
> On Thu, Nov 20, 2008 at 12:42:58PM +0100, Patrick McHardy wrote:
> ...
>   
>> Documentation for this in the kernel tree seems pretty useless though.
>>     
>
> Right! What about iproute2 tree? (examples/?)
>   
Yes, I'll send something when I find some time (manpage probably).

Its trivial to use though, these is only a single optional parameter,
the quantum.


^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: pkt_sched: add DRR scheduler
  2008-11-20 11:58       ` Patrick McHardy
@ 2008-11-20 12:06         ` Jarek Poplawski
  0 siblings, 0 replies; 20+ messages in thread
From: Jarek Poplawski @ 2008-11-20 12:06 UTC (permalink / raw)
  To: Patrick McHardy; +Cc: David S. Miller, Linux Netdev List

On Thu, Nov 20, 2008 at 12:58:11PM +0100, Patrick McHardy wrote:
...
> Its trivial to use though, these is only a single optional parameter,
> the quantum.

OK, but if you advertise this as a "flexible replacement for SFQ"
sb. could be mislead, so I mean at least a simple example of such
replacement.

Jarek P.

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: pkt_sched: add DRR scheduler
  2008-11-20 11:42   ` Patrick McHardy
  2008-11-20 11:51     ` Jarek Poplawski
@ 2008-11-20 12:10     ` David Miller
  2008-11-21 12:19     ` [PATCH] " Jarek Poplawski
                       ` (2 subsequent siblings)
  4 siblings, 0 replies; 20+ messages in thread
From: David Miller @ 2008-11-20 12:10 UTC (permalink / raw)
  To: kaber; +Cc: jarkao2, netdev

From: Patrick McHardy <kaber@trash.net>
Date: Thu, 20 Nov 2008 12:42:58 +0100

> commit c6e712b1cf9446096159414cb9d7f8c58effe92f
> Author: Patrick McHardy <kaber@trash.net>
> Date:   Thu Nov 20 12:39:18 2008 +0100
> 
>     pkt_sched: add DRR scheduler
>     
>     Add classful DRR scheduler as a more flexible replacement for SFQ.
>     
>     The main difference to the algorithm described in "Efficient Fair Queueing
>     using Deficit Round Robin" is that this implementation doesn't drop packets
>     from the longest queue on overrun because its classful and limits are
>     handled by each individual child qdisc.
>     
>     Signed-off-by: Patrick McHardy <kaber@trash.net>

Applied, thanks Patrick.


^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCH] Re: pkt_sched: add DRR scheduler
  2008-11-20 11:42   ` Patrick McHardy
  2008-11-20 11:51     ` Jarek Poplawski
  2008-11-20 12:10     ` David Miller
@ 2008-11-21 12:19     ` Jarek Poplawski
  2008-11-21 12:36       ` Patrick McHardy
  2008-11-24 10:50     ` [PATCH] pkt_sched: sch_drr: Fix drr_dequeue() loop Jarek Poplawski
  2008-11-24 10:53     ` [PATCH v2] " Jarek Poplawski
  4 siblings, 1 reply; 20+ messages in thread
From: Jarek Poplawski @ 2008-11-21 12:19 UTC (permalink / raw)
  To: Patrick McHardy; +Cc: David S. Miller, Linux Netdev List

A small oversight:
----------------->
pkt_sched: sch_drr: Fix qlen in drr_drop()

Signed-off-by: Jarek Poplawski <jarkao2@gmail.com>
---

 net/sched/sch_drr.c |    1 +
 1 files changed, 1 insertions(+), 0 deletions(-)

diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 8d523d9..37e6ab9 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -406,6 +406,7 @@ static unsigned int drr_drop(struct Qdisc *sch)
 		if (cl->qdisc->ops->drop) {
 			len = cl->qdisc->ops->drop(cl->qdisc);
 			if (len > 0) {
+				sch->q.qlen--;
 				if (cl->qdisc->q.qlen == 0)
 					list_del(&cl->alist);
 				return len;

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* Re: [PATCH] Re: pkt_sched: add DRR scheduler
  2008-11-21 12:19     ` [PATCH] " Jarek Poplawski
@ 2008-11-21 12:36       ` Patrick McHardy
  2008-11-21 12:37         ` David Miller
  0 siblings, 1 reply; 20+ messages in thread
From: Patrick McHardy @ 2008-11-21 12:36 UTC (permalink / raw)
  To: Jarek Poplawski; +Cc: David S. Miller, Linux Netdev List

Jarek Poplawski wrote:
> A small oversight:
> ----------------->
> pkt_sched: sch_drr: Fix qlen in drr_drop()
>
> Signed-off-by: Jarek Poplawski <jarkao2@gmail.com>

Good catch, thanks.

Acked-by: Patrick McHardy <kaber@trash.net>

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH] Re: pkt_sched: add DRR scheduler
  2008-11-21 12:36       ` Patrick McHardy
@ 2008-11-21 12:37         ` David Miller
  0 siblings, 0 replies; 20+ messages in thread
From: David Miller @ 2008-11-21 12:37 UTC (permalink / raw)
  To: kaber; +Cc: jarkao2, netdev

From: Patrick McHardy <kaber@trash.net>
Date: Fri, 21 Nov 2008 13:36:21 +0100

> Jarek Poplawski wrote:
> > A small oversight:
> > ----------------->
> > pkt_sched: sch_drr: Fix qlen in drr_drop()
> >
> > Signed-off-by: Jarek Poplawski <jarkao2@gmail.com>
> 
> Good catch, thanks.
> 
> Acked-by: Patrick McHardy <kaber@trash.net>

Applied, thanks.

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCH] pkt_sched: sch_drr: Fix drr_dequeue() loop
  2008-11-20 11:42   ` Patrick McHardy
                       ` (2 preceding siblings ...)
  2008-11-21 12:19     ` [PATCH] " Jarek Poplawski
@ 2008-11-24 10:50     ` Jarek Poplawski
  2008-11-24 10:53     ` [PATCH v2] " Jarek Poplawski
  4 siblings, 0 replies; 20+ messages in thread
From: Jarek Poplawski @ 2008-11-24 10:50 UTC (permalink / raw)
  To: Patrick McHardy; +Cc: David S. Miller, Linux Netdev List

pkt_sched: sch_drr: Fix loop in drr_dequeue

If all child qdiscs of sch_drr are non-work-conserving (e.g. sch_tbf)
drr_dequeue() will busy-loop waiting for skbs instead of leaving the
job for a watchdog. Checking for list_empty() in each loop isn't
necessary either, because this can never be true exept the first time.

Signed-off-by: Jarek Poplawski <jarkao2@gmail.com>
---

 net/sched/sch_drr.c |   24 +++++++++++++++++++++---
 1 files changed, 21 insertions(+), 3 deletions(-)

diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 37e6ab9..ab75461 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -369,12 +369,17 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 static struct sk_buff *drr_dequeue(struct Qdisc *sch)
 {
 	struct drr_sched *q = qdisc_priv(sch);
-	struct drr_class *cl;
+	struct drr_class *cl, *cl_first;
 	struct sk_buff *skb;
 	unsigned int len;
+	bool skb_waiting = false;
 
-	while (!list_empty(&q->active)) {
-		cl = list_first_entry(&q->active, struct drr_class, alist);
+	if (list_empty(&q->active))
+		return NULL;
+
+	cl_first = cl = list_first_entry(&q->active, struct drr_class, alist);
+
+	while (1) {
 		skb = cl->qdisc->ops->peek(cl->qdisc);
 		if (skb == NULL)
 			goto skip;
@@ -390,9 +395,22 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
 		}
 
 		cl->deficit += cl->quantum;
+		skb_waiting = true;
 skip:
 		list_move_tail(&cl->alist, &q->active);
+		cl = list_first_entry(&q->active, struct drr_class, alist);
+
+		if (cl == cl_first) {
+			if (skb_waiting) {
+				/* next round of deficit refilling */
+				skb_waiting = false;
+			} else {
+				/* all qdiscs are non-work-conserving! */
+				break;
+			}
+		}
 	}
+
 	return NULL;
 }
 

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH v2] pkt_sched: sch_drr: Fix drr_dequeue() loop
  2008-11-20 11:42   ` Patrick McHardy
                       ` (3 preceding siblings ...)
  2008-11-24 10:50     ` [PATCH] pkt_sched: sch_drr: Fix drr_dequeue() loop Jarek Poplawski
@ 2008-11-24 10:53     ` Jarek Poplawski
  2008-11-24 12:15       ` Patrick McHardy
  4 siblings, 1 reply; 20+ messages in thread
From: Jarek Poplawski @ 2008-11-24 10:53 UTC (permalink / raw)
  To: Patrick McHardy; +Cc: David S. Miller, Linux Netdev List

(Changelog fixed only)

pkt_sched: sch_drr: Fix drr_dequeue() loop

If all child qdiscs of sch_drr are non-work-conserving (e.g. sch_tbf)
drr_dequeue() will busy-loop waiting for skbs instead of leaving the
job for a watchdog. Checking for list_empty() in each loop isn't
necessary either, because this can never be true except the first time.

Signed-off-by: Jarek Poplawski <jarkao2@gmail.com>
---

 net/sched/sch_drr.c |   24 +++++++++++++++++++++---
 1 files changed, 21 insertions(+), 3 deletions(-)

diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 37e6ab9..ab75461 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -369,12 +369,17 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 static struct sk_buff *drr_dequeue(struct Qdisc *sch)
 {
 	struct drr_sched *q = qdisc_priv(sch);
-	struct drr_class *cl;
+	struct drr_class *cl, *cl_first;
 	struct sk_buff *skb;
 	unsigned int len;
+	bool skb_waiting = false;
 
-	while (!list_empty(&q->active)) {
-		cl = list_first_entry(&q->active, struct drr_class, alist);
+	if (list_empty(&q->active))
+		return NULL;
+
+	cl_first = cl = list_first_entry(&q->active, struct drr_class, alist);
+
+	while (1) {
 		skb = cl->qdisc->ops->peek(cl->qdisc);
 		if (skb == NULL)
 			goto skip;
@@ -390,9 +395,22 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
 		}
 
 		cl->deficit += cl->quantum;
+		skb_waiting = true;
 skip:
 		list_move_tail(&cl->alist, &q->active);
+		cl = list_first_entry(&q->active, struct drr_class, alist);
+
+		if (cl == cl_first) {
+			if (skb_waiting) {
+				/* next round of deficit refilling */
+				skb_waiting = false;
+			} else {
+				/* all qdiscs are non-work-conserving! */
+				break;
+			}
+		}
 	}
+
 	return NULL;
 }
 

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* Re: [PATCH v2] pkt_sched: sch_drr: Fix drr_dequeue() loop
  2008-11-24 10:53     ` [PATCH v2] " Jarek Poplawski
@ 2008-11-24 12:15       ` Patrick McHardy
  2008-11-24 12:33         ` Jarek Poplawski
  0 siblings, 1 reply; 20+ messages in thread
From: Patrick McHardy @ 2008-11-24 12:15 UTC (permalink / raw)
  To: Jarek Poplawski; +Cc: David S. Miller, Linux Netdev List

[-- Attachment #1: Type: text/plain, Size: 568 bytes --]

Jarek Poplawski wrote:
> (Changelog fixed only)
> 
> pkt_sched: sch_drr: Fix drr_dequeue() loop
> 
> If all child qdiscs of sch_drr are non-work-conserving (e.g. sch_tbf)
> drr_dequeue() will busy-loop waiting for skbs instead of leaving the
> job for a watchdog. Checking for list_empty() in each loop isn't
> necessary either, because this can never be true except the first time.

Thanks for the report. I don't like to overcomplicate treatment
of this broken configuration though, so this patch simply returns
NULL when the inner qdiscs is non-work-conserving.




[-- Attachment #2: 01.diff --]
[-- Type: text/x-patch, Size: 1491 bytes --]

commit f995e708c778d5c3201d769ffae636f6941654ed
Author: Patrick McHardy <kaber@trash.net>
Date:   Mon Nov 24 13:13:16 2008 +0100

    pkt_sched: sch_drr: fix drr_dequeue loop()
    
    Jarek Poplawski points out:
    
    If all child qdiscs of sch_drr are non-work-conserving (e.g. sch_tbf)
    drr_dequeue() will busy-loop waiting for skbs instead of leaving the
    job for a watchdog. Checking for list_empty() in each loop isn't
    necessary either, because this can never be true except the first time.
    
    Using non-work-conserving qdiscs as children of DRR makes no sense,
    simply bail out in that case.
    
    Reported-by: Jarek Poplawski <jarkao2@gmail.com>
    Signed-off-by: Patrick McHardy <kaber@trash.net>

diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 37e6ab9..e7a7e87 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -373,11 +373,13 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
 	struct sk_buff *skb;
 	unsigned int len;
 
-	while (!list_empty(&q->active)) {
+	if (list_empty(&q->active))
+		goto out;
+	while (1) {
 		cl = list_first_entry(&q->active, struct drr_class, alist);
 		skb = cl->qdisc->ops->peek(cl->qdisc);
 		if (skb == NULL)
-			goto skip;
+			goto out;
 
 		len = qdisc_pkt_len(skb);
 		if (len <= cl->deficit) {
@@ -390,9 +392,9 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
 		}
 
 		cl->deficit += cl->quantum;
-skip:
 		list_move_tail(&cl->alist, &q->active);
 	}
+out:
 	return NULL;
 }
 

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* Re: [PATCH v2] pkt_sched: sch_drr: Fix drr_dequeue() loop
  2008-11-24 12:15       ` Patrick McHardy
@ 2008-11-24 12:33         ` Jarek Poplawski
  2008-11-24 12:38           ` Patrick McHardy
  0 siblings, 1 reply; 20+ messages in thread
From: Jarek Poplawski @ 2008-11-24 12:33 UTC (permalink / raw)
  To: Patrick McHardy; +Cc: David S. Miller, Linux Netdev List

On Mon, Nov 24, 2008 at 01:15:07PM +0100, Patrick McHardy wrote:
> Jarek Poplawski wrote:
>> (Changelog fixed only)
>>
>> pkt_sched: sch_drr: Fix drr_dequeue() loop
>>
>> If all child qdiscs of sch_drr are non-work-conserving (e.g. sch_tbf)
>> drr_dequeue() will busy-loop waiting for skbs instead of leaving the
>> job for a watchdog. Checking for list_empty() in each loop isn't
>> necessary either, because this can never be true except the first time.
>
> Thanks for the report. I don't like to overcomplicate treatment
> of this broken configuration though, so this patch simply returns
> NULL when the inner qdiscs is non-work-conserving.

Hmm... I don't agree with treating this as broken (IIRC Denys
Fedoryshchenko wrote about some case when he prefers TBF over HTB,
and with DRR this could be an interesting alternative), but of course
you are the author and I respect your decision.

Thanks,
Jarek P.

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH v2] pkt_sched: sch_drr: Fix drr_dequeue() loop
  2008-11-24 12:33         ` Jarek Poplawski
@ 2008-11-24 12:38           ` Patrick McHardy
  2008-11-24 12:51             ` Jarek Poplawski
  0 siblings, 1 reply; 20+ messages in thread
From: Patrick McHardy @ 2008-11-24 12:38 UTC (permalink / raw)
  To: Jarek Poplawski; +Cc: David S. Miller, Linux Netdev List

Jarek Poplawski wrote:
> On Mon, Nov 24, 2008 at 01:15:07PM +0100, Patrick McHardy wrote:
>> Jarek Poplawski wrote:
>>> (Changelog fixed only)
>>>
>>> pkt_sched: sch_drr: Fix drr_dequeue() loop
>>>
>>> If all child qdiscs of sch_drr are non-work-conserving (e.g. sch_tbf)
>>> drr_dequeue() will busy-loop waiting for skbs instead of leaving the
>>> job for a watchdog. Checking for list_empty() in each loop isn't
>>> necessary either, because this can never be true except the first time.
>> Thanks for the report. I don't like to overcomplicate treatment
>> of this broken configuration though, so this patch simply returns
>> NULL when the inner qdiscs is non-work-conserving.
> 
> Hmm... I don't agree with treating this as broken (IIRC Denys
> Fedoryshchenko wrote about some case when he prefers TBF over HTB,
> and with DRR this could be an interesting alternative), but of course
> you are the author and I respect your decision.

TBF with an inner DRR is fine. The other way around is broken
in the sense that the behaviour is undefined.

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH v2] pkt_sched: sch_drr: Fix drr_dequeue() loop
  2008-11-24 12:38           ` Patrick McHardy
@ 2008-11-24 12:51             ` Jarek Poplawski
  2008-11-24 13:17               ` Patrick McHardy
  0 siblings, 1 reply; 20+ messages in thread
From: Jarek Poplawski @ 2008-11-24 12:51 UTC (permalink / raw)
  To: Patrick McHardy; +Cc: David S. Miller, Linux Netdev List

On Mon, Nov 24, 2008 at 01:38:48PM +0100, Patrick McHardy wrote:
...
> TBF with an inner DRR is fine. The other way around is broken
> in the sense that the behaviour is undefined.

IMHO, this other way (e.g. a class with TBF per user), should work too.

BTW, since this "broken" config isn't very apparent, maybe you should
add some warning?

Jarek P. 

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH v2] pkt_sched: sch_drr: Fix drr_dequeue() loop
  2008-11-24 12:51             ` Jarek Poplawski
@ 2008-11-24 13:17               ` Patrick McHardy
  2008-11-24 13:45                 ` Jarek Poplawski
  0 siblings, 1 reply; 20+ messages in thread
From: Patrick McHardy @ 2008-11-24 13:17 UTC (permalink / raw)
  To: Jarek Poplawski; +Cc: David S. Miller, Linux Netdev List

Jarek Poplawski wrote:
> On Mon, Nov 24, 2008 at 01:38:48PM +0100, Patrick McHardy wrote:
> ...
>> TBF with an inner DRR is fine. The other way around is broken
>> in the sense that the behaviour is undefined.
> 
> IMHO, this other way (e.g. a class with TBF per user), should work too.

The behaviour undefined, so what does "work too" mean in this context?

The main question is: what should be done with the class when it
throttles?

You suggest moving it to the end of the active list. Should its deficit
be recharged in that case? Possible no because it didn't send packets -
but then again it might have handed out *some* packets (less than the
deficit) before it started throttling. Both ways would introduce
unfairness.

What could be done without harming the algorithm is to treat throttled
classes as inactive until they become unthrottled again, meaning they
would be added to the end of the active list with a full deficit. But
we have no indication for specific classes, unthrottling simply triggers
another dequeue of the root, so the implementation would get quite
complicated, leaving alone the fact that each TBF would potentially
start its own watchdog, causing excessive wakeups.

And I don't see much use for this, what is the advantage over using
HTB or HFSC?

> BTW, since this "broken" config isn't very apparent, maybe you should
> add some warning?

Yes, but I don't want to add this to the ->dequeue() path. It belongs
in the ->init() path and we currently don't have enough information
in there.

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH v2] pkt_sched: sch_drr: Fix drr_dequeue() loop
  2008-11-24 13:17               ` Patrick McHardy
@ 2008-11-24 13:45                 ` Jarek Poplawski
  2008-11-24 23:47                   ` David Miller
  0 siblings, 1 reply; 20+ messages in thread
From: Jarek Poplawski @ 2008-11-24 13:45 UTC (permalink / raw)
  To: Patrick McHardy; +Cc: David S. Miller, Linux Netdev List

On Mon, Nov 24, 2008 at 02:17:32PM +0100, Patrick McHardy wrote:
> Jarek Poplawski wrote:
>> On Mon, Nov 24, 2008 at 01:38:48PM +0100, Patrick McHardy wrote:
>> ...
>>> TBF with an inner DRR is fine. The other way around is broken
>>> in the sense that the behaviour is undefined.
>>
>> IMHO, this other way (e.g. a class with TBF per user), should work too.
>
> The behaviour undefined, so what does "work too" mean in this context?
>
> The main question is: what should be done with the class when it
> throttles?
>
> You suggest moving it to the end of the active list. Should its deficit
> be recharged in that case? Possible no because it didn't send packets -
> but then again it might have handed out *some* packets (less than the
> deficit) before it started throttling. Both ways would introduce
> unfairness.
>
> What could be done without harming the algorithm is to treat throttled
> classes as inactive until they become unthrottled again, meaning they
> would be added to the end of the active list with a full deficit. But
> we have no indication for specific classes, unthrottling simply triggers
> another dequeue of the root, so the implementation would get quite
> complicated, leaving alone the fact that each TBF would potentially
> start its own watchdog, causing excessive wakeups.
>
> And I don't see much use for this, what is the advantage over using
> HTB or HFSC?

Probably no advantage, if you now these things... or for testing. But
I like to give users a choice, at least if it's not obviously wrong.
Of course, if you think this harms the proper configs with too much
overhead, then there is no question we should forget about this.

Jarek P.

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH v2] pkt_sched: sch_drr: Fix drr_dequeue() loop
  2008-11-24 13:45                 ` Jarek Poplawski
@ 2008-11-24 23:47                   ` David Miller
  2008-11-25 11:42                     ` Patrick McHardy
  0 siblings, 1 reply; 20+ messages in thread
From: David Miller @ 2008-11-24 23:47 UTC (permalink / raw)
  To: jarkao2; +Cc: kaber, netdev

From: Jarek Poplawski <jarkao2@gmail.com>
Date: Mon, 24 Nov 2008 13:45:16 +0000

> On Mon, Nov 24, 2008 at 02:17:32PM +0100, Patrick McHardy wrote:
> > Jarek Poplawski wrote:
> >> On Mon, Nov 24, 2008 at 01:38:48PM +0100, Patrick McHardy wrote:
> >> ...
> >>> TBF with an inner DRR is fine. The other way around is broken
> >>> in the sense that the behaviour is undefined.
> >>
> >> IMHO, this other way (e.g. a class with TBF per user), should work too.
> >
> > The behaviour undefined, so what does "work too" mean in this context?
> >
> > The main question is: what should be done with the class when it
> > throttles?
> >
> > You suggest moving it to the end of the active list. Should its deficit
> > be recharged in that case? Possible no because it didn't send packets -
> > but then again it might have handed out *some* packets (less than the
> > deficit) before it started throttling. Both ways would introduce
> > unfairness.
> >
> > What could be done without harming the algorithm is to treat throttled
> > classes as inactive until they become unthrottled again, meaning they
> > would be added to the end of the active list with a full deficit. But
> > we have no indication for specific classes, unthrottling simply triggers
> > another dequeue of the root, so the implementation would get quite
> > complicated, leaving alone the fact that each TBF would potentially
> > start its own watchdog, causing excessive wakeups.
> >
> > And I don't see much use for this, what is the advantage over using
> > HTB or HFSC?
> 
> Probably no advantage, if you now these things... or for testing. But
> I like to give users a choice, at least if it's not obviously wrong.
> Of course, if you think this harms the proper configs with too much
> overhead, then there is no question we should forget about this.

Things seem to have settled, thus I have applied Patrick's version
of the fix.

But I encourage people to add the necessary framework such that
such unwanted configurations can be in fact detected at ->init()
time and thus properly warned about.

Silent packet dropping really upsets users.

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH v2] pkt_sched: sch_drr: Fix drr_dequeue() loop
  2008-11-24 23:47                   ` David Miller
@ 2008-11-25 11:42                     ` Patrick McHardy
  0 siblings, 0 replies; 20+ messages in thread
From: Patrick McHardy @ 2008-11-25 11:42 UTC (permalink / raw)
  To: David Miller; +Cc: jarkao2, netdev

David Miller wrote:
> Things seem to have settled, thus I have applied Patrick's version
> of the fix.

Thanks.

> But I encourage people to add the necessary framework such that
> such unwanted configurations can be in fact detected at ->init()
> time and thus properly warned about.
> 
> Silent packet dropping really upsets users.

The packets won't be dropped, the qdisc will simply wait until the
throttled inner qdisc becomes active again.

Refusing incorrect changes in ->init() is not easy because at least
HFSC can switch between work-conserving and non-work-conserving
through ->change(), so the upper qdisc would have to be able to
perform validation before the change is made. This is additionally
complicated by the fact that (in case of HFSC) it can't be determined
by looking only at the class that is currently changed, the use
of a upper-limit curve on any class makes it non-work-conserving.

Perhaps its best to ignore this special case and always treat HFSC
as non-work-conserving, I haven't seen a configuration which uses
a hierarchical qdisc as inner qdisc so far.


^ permalink raw reply	[flat|nested] 20+ messages in thread

end of thread, other threads:[~2008-11-25 11:42 UTC | newest]

Thread overview: 20+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-11-19 14:26 pkt_sched: add DRR scheduler Patrick McHardy
2008-11-20 11:35 ` Jarek Poplawski
2008-11-20 11:42   ` Patrick McHardy
2008-11-20 11:51     ` Jarek Poplawski
2008-11-20 11:58       ` Patrick McHardy
2008-11-20 12:06         ` Jarek Poplawski
2008-11-20 12:10     ` David Miller
2008-11-21 12:19     ` [PATCH] " Jarek Poplawski
2008-11-21 12:36       ` Patrick McHardy
2008-11-21 12:37         ` David Miller
2008-11-24 10:50     ` [PATCH] pkt_sched: sch_drr: Fix drr_dequeue() loop Jarek Poplawski
2008-11-24 10:53     ` [PATCH v2] " Jarek Poplawski
2008-11-24 12:15       ` Patrick McHardy
2008-11-24 12:33         ` Jarek Poplawski
2008-11-24 12:38           ` Patrick McHardy
2008-11-24 12:51             ` Jarek Poplawski
2008-11-24 13:17               ` Patrick McHardy
2008-11-24 13:45                 ` Jarek Poplawski
2008-11-24 23:47                   ` David Miller
2008-11-25 11:42                     ` Patrick McHardy

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).