* [PATCH net-next 0/6] netem patches
@ 2011-02-23 23:04 Stephen Hemminger
2011-02-23 23:04 ` [PATCH net-next 1/6] [PATCH 5/9] netem: cleanup dump code Stephen Hemminger
` (6 more replies)
0 siblings, 7 replies; 8+ messages in thread
From: Stephen Hemminger @ 2011-02-23 23:04 UTC (permalink / raw)
To: David S. Miller; +Cc: netdev
These have been resting in a moldy place for far to long.
The most important is the integration of a better packet
loss model for netem
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH net-next 1/6] [PATCH 5/9] netem: cleanup dump code
2011-02-23 23:04 [PATCH net-next 0/6] netem patches Stephen Hemminger
@ 2011-02-23 23:04 ` Stephen Hemminger
2011-02-23 23:04 ` [PATCH net-next 2/6] netem: use vmalloc for distribution table Stephen Hemminger
` (5 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Stephen Hemminger @ 2011-02-23 23:04 UTC (permalink / raw)
To: David S. Miller; +Cc: netdev
[-- Attachment #1: netem-dump.patch --]
[-- Type: text/plain, Size: 1115 bytes --]
Use nla_put_nested to update netlink attribute value.
Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
---
net/sched/sch_netem.c | 9 +++------
1 file changed, 3 insertions(+), 6 deletions(-)
--- a/net/sched/sch_netem.c 2011-02-23 14:43:09.114302606 -0800
+++ b/net/sched/sch_netem.c 2011-02-23 14:43:27.598650361 -0800
@@ -562,8 +562,7 @@ static void netem_destroy(struct Qdisc *
static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
{
const struct netem_sched_data *q = qdisc_priv(sch);
- unsigned char *b = skb_tail_pointer(skb);
- struct nlattr *nla = (struct nlattr *) b;
+ struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
struct tc_netem_qopt qopt;
struct tc_netem_corr cor;
struct tc_netem_reorder reorder;
@@ -590,12 +589,10 @@ static int netem_dump(struct Qdisc *sch,
corrupt.correlation = q->corrupt_cor.rho;
NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
- nla->nla_len = skb_tail_pointer(skb) - b;
-
- return skb->len;
+ return nla_nest_end(skb, nla);
nla_put_failure:
- nlmsg_trim(skb, b);
+ nlmsg_trim(skb, nla);
return -1;
}
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH net-next 2/6] netem: use vmalloc for distribution table
2011-02-23 23:04 [PATCH net-next 0/6] netem patches Stephen Hemminger
2011-02-23 23:04 ` [PATCH net-next 1/6] [PATCH 5/9] netem: cleanup dump code Stephen Hemminger
@ 2011-02-23 23:04 ` Stephen Hemminger
2011-02-23 23:04 ` [PATCH net-next 3/6] netem: define NETEM_DIST_MAX Stephen Hemminger
` (4 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Stephen Hemminger @ 2011-02-23 23:04 UTC (permalink / raw)
To: David S. Miller; +Cc: netdev
[-- Attachment #1: netem-vmalloc.patch --]
[-- Type: text/plain, Size: 1760 bytes --]
The netem probability table can be large (up to 64K bytes)
which may be too large to allocate in one contiguous chunk.
Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
--- a/net/sched/sch_netem.c 2011-02-23 14:43:27.000000000 -0800
+++ b/net/sched/sch_netem.c 2011-02-23 14:49:07.228646202 -0800
@@ -308,6 +308,16 @@ static void netem_reset(struct Qdisc *sc
qdisc_watchdog_cancel(&q->watchdog);
}
+static void dist_free(struct disttable *d)
+{
+ if (d) {
+ if (is_vmalloc_addr(d))
+ vfree(d);
+ else
+ kfree(d);
+ }
+}
+
/*
* Distribution data is a variable size payload containing
* signed 16 bit values.
@@ -315,16 +325,20 @@ static void netem_reset(struct Qdisc *sc
static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
{
struct netem_sched_data *q = qdisc_priv(sch);
- unsigned long n = nla_len(attr)/sizeof(__s16);
+ size_t n = nla_len(attr)/sizeof(__s16);
const __s16 *data = nla_data(attr);
spinlock_t *root_lock;
struct disttable *d;
int i;
+ size_t s;
if (n > 65536)
return -EINVAL;
- d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL);
+ s = sizeof(struct disttable) + n * sizeof(s16);
+ d = kmalloc(s, GFP_KERNEL);
+ if (!d)
+ d = vmalloc(s);
if (!d)
return -ENOMEM;
@@ -335,7 +349,7 @@ static int get_dist_table(struct Qdisc *
root_lock = qdisc_root_sleeping_lock(sch);
spin_lock_bh(root_lock);
- kfree(q->delay_dist);
+ dist_free(q->delay_dist);
q->delay_dist = d;
spin_unlock_bh(root_lock);
return 0;
@@ -556,7 +570,7 @@ static void netem_destroy(struct Qdisc *
qdisc_watchdog_cancel(&q->watchdog);
qdisc_destroy(q->qdisc);
- kfree(q->delay_dist);
+ dist_free(q->delay_dist);
}
static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH net-next 3/6] netem: define NETEM_DIST_MAX
2011-02-23 23:04 [PATCH net-next 0/6] netem patches Stephen Hemminger
2011-02-23 23:04 ` [PATCH net-next 1/6] [PATCH 5/9] netem: cleanup dump code Stephen Hemminger
2011-02-23 23:04 ` [PATCH net-next 2/6] netem: use vmalloc for distribution table Stephen Hemminger
@ 2011-02-23 23:04 ` Stephen Hemminger
2011-02-23 23:04 ` [PATCH net-next 4/6] [PATCH] Revert "sch_netem: Remove classful functionality" Stephen Hemminger
` (3 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Stephen Hemminger @ 2011-02-23 23:04 UTC (permalink / raw)
To: David S. Miller; +Cc: netdev
[-- Attachment #1: netem-maxsize.patch --]
[-- Type: text/plain, Size: 930 bytes --]
Rather than magic constant in code, expose the maximum size of
packet distribution table in API. In iproute2, q_netem defines
MAX_DIST as 16K already.
Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
---
include/linux/pkt_sched.h | 1 +
net/sched/sch_netem.c | 2 +-
2 files changed, 2 insertions(+), 1 deletion(-)
--- a/include/linux/pkt_sched.h 2011-02-23 14:43:08.838297372 -0800
+++ b/include/linux/pkt_sched.h 2011-02-23 14:50:10.329760558 -0800
@@ -495,6 +495,7 @@ struct tc_netem_corrupt {
};
#define NETEM_DIST_SCALE 8192
+#define NETEM_DIST_MAX 16384
/* DRR */
--- a/net/sched/sch_netem.c 2011-02-23 14:50:09.445745344 -0800
+++ b/net/sched/sch_netem.c 2011-02-23 14:50:10.329760558 -0800
@@ -332,7 +332,7 @@ static int get_dist_table(struct Qdisc *
int i;
size_t s;
- if (n > 65536)
+ if (n > NETEM_DIST_MAX)
return -EINVAL;
s = sizeof(struct disttable) + n * sizeof(s16);
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH net-next 4/6] [PATCH] Revert "sch_netem: Remove classful functionality"
2011-02-23 23:04 [PATCH net-next 0/6] netem patches Stephen Hemminger
` (2 preceding siblings ...)
2011-02-23 23:04 ` [PATCH net-next 3/6] netem: define NETEM_DIST_MAX Stephen Hemminger
@ 2011-02-23 23:04 ` Stephen Hemminger
2011-02-23 23:04 ` [PATCH net-next 5/6] netem: revised correlated loss generator Stephen Hemminger
` (2 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Stephen Hemminger @ 2011-02-23 23:04 UTC (permalink / raw)
To: David S. Miller; +Cc: netdev
[-- Attachment #1: netem-classful.patch --]
[-- Type: text/plain, Size: 3274 bytes --]
Many users have wanted the old functionality that was lost
to be able to use pfifo as inner qdisc for netem. The reason that
netem could not be classful with the older API was because of the
limitations of the old dequeue/requeue interface; now that qdisc API has
a peek function, there is no longer a problem with using any
inner qdisc's.
This reverts commit 02201464119334690fe209849843881b8e9cfa9f.
Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
---
net/sched/sch_netem.c | 87 +++++++++++++++++++++++++++++++++++++++++++++-----
1 file changed, 79 insertions(+), 8 deletions(-)
--- a/net/sched/sch_netem.c 2011-02-23 14:50:10.329760558 -0800
+++ b/net/sched/sch_netem.c 2011-02-23 14:50:19.933925120 -0800
@@ -238,14 +238,15 @@ static int netem_enqueue(struct sk_buff
ret = NET_XMIT_SUCCESS;
}
- if (likely(ret == NET_XMIT_SUCCESS)) {
- sch->q.qlen++;
- } else if (net_xmit_drop_count(ret)) {
- sch->qstats.drops++;
+ if (ret != NET_XMIT_SUCCESS) {
+ if (net_xmit_drop_count(ret)) {
+ sch->qstats.drops++;
+ return ret;
+ }
}
- pr_debug("netem: enqueue ret %d\n", ret);
- return ret;
+ sch->q.qlen++;
+ return NET_XMIT_SUCCESS;
}
static unsigned int netem_drop(struct Qdisc *sch)
@@ -287,9 +288,10 @@ static struct sk_buff *netem_dequeue(str
if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
skb->tstamp.tv64 = 0;
#endif
- pr_debug("netem_dequeue: return skb=%p\n", skb);
- qdisc_bstats_update(sch, skb);
+
sch->q.qlen--;
+ qdisc_unthrottled(sch);
+ qdisc_bstats_update(sch, skb);
return skb;
}
@@ -610,8 +612,77 @@ nla_put_failure:
return -1;
}
+static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
+ struct sk_buff *skb, struct tcmsg *tcm)
+{
+ struct netem_sched_data *q = qdisc_priv(sch);
+
+ if (cl != 1) /* only one class */
+ return -ENOENT;
+
+ tcm->tcm_handle |= TC_H_MIN(1);
+ tcm->tcm_info = q->qdisc->handle;
+
+ return 0;
+}
+
+static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ struct Qdisc **old)
+{
+ struct netem_sched_data *q = qdisc_priv(sch);
+
+ if (new == NULL)
+ new = &noop_qdisc;
+
+ sch_tree_lock(sch);
+ *old = q->qdisc;
+ q->qdisc = new;
+ qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+ qdisc_reset(*old);
+ sch_tree_unlock(sch);
+
+ return 0;
+}
+
+static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
+{
+ struct netem_sched_data *q = qdisc_priv(sch);
+ return q->qdisc;
+}
+
+static unsigned long netem_get(struct Qdisc *sch, u32 classid)
+{
+ return 1;
+}
+
+static void netem_put(struct Qdisc *sch, unsigned long arg)
+{
+}
+
+static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
+{
+ if (!walker->stop) {
+ if (walker->count >= walker->skip)
+ if (walker->fn(sch, 1, walker) < 0) {
+ walker->stop = 1;
+ return;
+ }
+ walker->count++;
+ }
+}
+
+static const struct Qdisc_class_ops netem_class_ops = {
+ .graft = netem_graft,
+ .leaf = netem_leaf,
+ .get = netem_get,
+ .put = netem_put,
+ .walk = netem_walk,
+ .dump = netem_dump_class,
+};
+
static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
.id = "netem",
+ .cl_ops = &netem_class_ops,
.priv_size = sizeof(struct netem_sched_data),
.enqueue = netem_enqueue,
.dequeue = netem_dequeue,
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH net-next 5/6] netem: revised correlated loss generator
2011-02-23 23:04 [PATCH net-next 0/6] netem patches Stephen Hemminger
` (3 preceding siblings ...)
2011-02-23 23:04 ` [PATCH net-next 4/6] [PATCH] Revert "sch_netem: Remove classful functionality" Stephen Hemminger
@ 2011-02-23 23:04 ` Stephen Hemminger
2011-02-23 23:04 ` [PATCH net-next 6/6] netem: update version and cleanup Stephen Hemminger
2011-02-25 6:14 ` [PATCH net-next 0/6] netem patches David Miller
6 siblings, 0 replies; 8+ messages in thread
From: Stephen Hemminger @ 2011-02-23 23:04 UTC (permalink / raw)
To: David S. Miller; +Cc: netdev
[-- Attachment #1: netem-loss-model.patch --]
[-- Type: text/plain, Size: 10589 bytes --]
This is a patch originated with Stefano Salsano and Fabio Ludovici.
It provides several alternative loss models for use with netem.
This patch adds two state machine based loss models.
See: http://netgroup.uniroma2.it/twiki/bin/view.cgi/Main/NetemCLG
Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
---
include/linux/pkt_sched.h | 26 ++++
net/sched/sch_netem.c | 274 +++++++++++++++++++++++++++++++++++++++++++++-
2 files changed, 296 insertions(+), 4 deletions(-)
--- a/net/sched/sch_netem.c 2011-02-23 14:53:08.144607422 -0800
+++ b/net/sched/sch_netem.c 2011-02-23 15:01:29.159338952 -0800
@@ -47,6 +47,20 @@
layering other disciplines. It does not need to do bandwidth
control either since that can be handled by using token
bucket or other rate control.
+
+ Correlated Loss Generator models
+
+ Added generation of correlated loss according to the
+ "Gilbert-Elliot" model, a 4-state markov model.
+
+ References:
+ [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
+ [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
+ and intuitive loss model for packet networks and its implementation
+ in the Netem module in the Linux kernel", available in [1]
+
+ Authors: Stefano Salsano <stefano.salsano at uniroma2.it
+ Fabio Ludovici <fabio.ludovici at yahoo.it>
*/
struct netem_sched_data {
@@ -73,6 +87,26 @@ struct netem_sched_data {
u32 size;
s16 table[0];
} *delay_dist;
+
+ enum {
+ CLG_RANDOM,
+ CLG_4_STATES,
+ CLG_GILB_ELL,
+ } loss_model;
+
+ /* Correlated Loss Generation models */
+ struct clgstate {
+ /* state of the Markov chain */
+ u8 state;
+
+ /* 4-states and Gilbert-Elliot models */
+ u32 a1; /* p13 for 4-states or p for GE */
+ u32 a2; /* p31 for 4-states or r for GE */
+ u32 a3; /* p32 for 4-states or h for GE */
+ u32 a4; /* p14 for 4-states or 1-k for GE */
+ u32 a5; /* p23 used only in 4-states */
+ } clg;
+
};
/* Time stamp put into socket buffer control block */
@@ -115,6 +149,122 @@ static u32 get_crandom(struct crndstate
return answer;
}
+/* loss_4state - 4-state model loss generator
+ * Generates losses according to the 4-state Markov chain adopted in
+ * the GI (General and Intuitive) loss model.
+ */
+static bool loss_4state(struct netem_sched_data *q)
+{
+ struct clgstate *clg = &q->clg;
+ u32 rnd = net_random();
+
+ /*
+ * Makes a comparision between rnd and the transition
+ * probabilities outgoing from the current state, then decides the
+ * next state and if the next packet has to be transmitted or lost.
+ * The four states correspond to:
+ * 1 => successfully transmitted packets within a gap period
+ * 4 => isolated losses within a gap period
+ * 3 => lost packets within a burst period
+ * 2 => successfully transmitted packets within a burst period
+ */
+ switch (clg->state) {
+ case 1:
+ if (rnd < clg->a4) {
+ clg->state = 4;
+ return true;
+ } else if (clg->a4 < rnd && rnd < clg->a1) {
+ clg->state = 3;
+ return true;
+ } else if (clg->a1 < rnd)
+ clg->state = 1;
+
+ break;
+ case 2:
+ if (rnd < clg->a5) {
+ clg->state = 3;
+ return true;
+ } else
+ clg->state = 2;
+
+ break;
+ case 3:
+ if (rnd < clg->a3)
+ clg->state = 2;
+ else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
+ clg->state = 1;
+ return true;
+ } else if (clg->a2 + clg->a3 < rnd) {
+ clg->state = 3;
+ return true;
+ }
+ break;
+ case 4:
+ clg->state = 1;
+ break;
+ }
+
+ return false;
+}
+
+/* loss_gilb_ell - Gilbert-Elliot model loss generator
+ * Generates losses according to the Gilbert-Elliot loss model or
+ * its special cases (Gilbert or Simple Gilbert)
+ *
+ * Makes a comparision between random number and the transition
+ * probabilities outgoing from the current state, then decides the
+ * next state. A second random number is extracted and the comparision
+ * with the loss probability of the current state decides if the next
+ * packet will be transmitted or lost.
+ */
+static bool loss_gilb_ell(struct netem_sched_data *q)
+{
+ struct clgstate *clg = &q->clg;
+
+ switch (clg->state) {
+ case 1:
+ if (net_random() < clg->a1)
+ clg->state = 2;
+ if (net_random() < clg->a4)
+ return true;
+ case 2:
+ if (net_random() < clg->a2)
+ clg->state = 1;
+ if (clg->a3 > net_random())
+ return true;
+ }
+
+ return false;
+}
+
+static bool loss_event(struct netem_sched_data *q)
+{
+ switch (q->loss_model) {
+ case CLG_RANDOM:
+ /* Random packet drop 0 => none, ~0 => all */
+ return q->loss && q->loss >= get_crandom(&q->loss_cor);
+
+ case CLG_4_STATES:
+ /* 4state loss model algorithm (used also for GI model)
+ * Extracts a value from the markov 4 state loss generator,
+ * if it is 1 drops a packet and if needed writes the event in
+ * the kernel logs
+ */
+ return loss_4state(q);
+
+ case CLG_GILB_ELL:
+ /* Gilbert-Elliot loss model algorithm
+ * Extracts a value from the Gilbert-Elliot loss generator,
+ * if it is 1 drops a packet and if needed writes the event in
+ * the kernel logs
+ */
+ return loss_gilb_ell(q);
+ }
+
+ return false; /* not reached */
+}
+
+
/* tabledist - return a pseudo-randomly distributed value with mean mu and
* std deviation sigma. Uses table lookup to approximate the desired
* distribution, and a uniformly-distributed pseudo-random source.
@@ -167,8 +317,8 @@ static int netem_enqueue(struct sk_buff
if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
++count;
- /* Random packet drop 0 => none, ~0 => all */
- if (q->loss && q->loss >= get_crandom(&q->loss_cor))
+ /* Drop packet? */
+ if (loss_event(q))
--count;
if (count == 0) {
@@ -385,10 +535,66 @@ static void get_corrupt(struct Qdisc *sc
init_crandom(&q->corrupt_cor, r->correlation);
}
+static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
+{
+ struct netem_sched_data *q = qdisc_priv(sch);
+ const struct nlattr *la;
+ int rem;
+
+ nla_for_each_nested(la, attr, rem) {
+ u16 type = nla_type(la);
+
+ switch(type) {
+ case NETEM_LOSS_GI: {
+ const struct tc_netem_gimodel *gi = nla_data(la);
+
+ if (nla_len(la) != sizeof(struct tc_netem_gimodel)) {
+ pr_info("netem: incorrect gi model size\n");
+ return -EINVAL;
+ }
+
+ q->loss_model = CLG_4_STATES;
+
+ q->clg.state = 1;
+ q->clg.a1 = gi->p13;
+ q->clg.a2 = gi->p31;
+ q->clg.a3 = gi->p32;
+ q->clg.a4 = gi->p14;
+ q->clg.a5 = gi->p23;
+ break;
+ }
+
+ case NETEM_LOSS_GE: {
+ const struct tc_netem_gemodel *ge = nla_data(la);
+
+ if (nla_len(la) != sizeof(struct tc_netem_gemodel)) {
+ pr_info("netem: incorrect gi model size\n");
+ return -EINVAL;
+ }
+
+ q->loss_model = CLG_GILB_ELL;
+ q->clg.state = 1;
+ q->clg.a1 = ge->p;
+ q->clg.a2 = ge->r;
+ q->clg.a3 = ge->h;
+ q->clg.a4 = ge->k1;
+ break;
+ }
+
+ default:
+ pr_info("netem: unknown loss type %u\n", type);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
[TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
[TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
[TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
+ [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
};
static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
@@ -396,11 +602,15 @@ static int parse_attr(struct nlattr *tb[
{
int nested_len = nla_len(nla) - NLA_ALIGN(len);
- if (nested_len < 0)
+ if (nested_len < 0) {
+ pr_info("netem: invalid attributes len %d\n", nested_len);
return -EINVAL;
+ }
+
if (nested_len >= nla_attr_size(0))
return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
nested_len, policy);
+
memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
return 0;
}
@@ -456,7 +666,11 @@ static int netem_change(struct Qdisc *sc
if (tb[TCA_NETEM_CORRUPT])
get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
- return 0;
+ q->loss_model = CLG_RANDOM;
+ if (tb[TCA_NETEM_LOSS])
+ ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]);
+
+ return ret;
}
/*
@@ -551,6 +765,7 @@ static int netem_init(struct Qdisc *sch,
qdisc_watchdog_init(&q->watchdog, sch);
+ q->loss_model = CLG_RANDOM;
q->qdisc = qdisc_create_dflt(sch->dev_queue, &tfifo_qdisc_ops,
TC_H_MAKE(sch->handle, 1));
if (!q->qdisc) {
@@ -575,6 +790,54 @@ static void netem_destroy(struct Qdisc *
dist_free(q->delay_dist);
}
+static int dump_loss_model(const struct netem_sched_data *q,
+ struct sk_buff *skb)
+{
+ struct nlattr *nest;
+
+ nest = nla_nest_start(skb, TCA_NETEM_LOSS);
+ if (nest == NULL)
+ goto nla_put_failure;
+
+ switch (q->loss_model) {
+ case CLG_RANDOM:
+ /* legacy loss model */
+ nla_nest_cancel(skb, nest);
+ return 0; /* no data */
+
+ case CLG_4_STATES: {
+ struct tc_netem_gimodel gi = {
+ .p13 = q->clg.a1,
+ .p31 = q->clg.a2,
+ .p32 = q->clg.a3,
+ .p14 = q->clg.a4,
+ .p23 = q->clg.a5,
+ };
+
+ NLA_PUT(skb, NETEM_LOSS_GI, sizeof(gi), &gi);
+ break;
+ }
+ case CLG_GILB_ELL: {
+ struct tc_netem_gemodel ge = {
+ .p = q->clg.a1,
+ .r = q->clg.a2,
+ .h = q->clg.a3,
+ .k1 = q->clg.a4,
+ };
+
+ NLA_PUT(skb, NETEM_LOSS_GE, sizeof(ge), &ge);
+ break;
+ }
+ }
+
+ nla_nest_end(skb, nest);
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nest);
+ return -1;
+}
+
static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
{
const struct netem_sched_data *q = qdisc_priv(sch);
@@ -605,6 +868,9 @@ static int netem_dump(struct Qdisc *sch,
corrupt.correlation = q->corrupt_cor.rho;
NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
+ if (dump_loss_model(q, skb) != 0)
+ goto nla_put_failure;
+
return nla_nest_end(skb, nla);
nla_put_failure:
--- a/include/linux/pkt_sched.h 2011-02-23 14:53:08.164607720 -0800
+++ b/include/linux/pkt_sched.h 2011-02-23 14:58:19.532718370 -0800
@@ -464,6 +464,7 @@ enum {
TCA_NETEM_DELAY_DIST,
TCA_NETEM_REORDER,
TCA_NETEM_CORRUPT,
+ TCA_NETEM_LOSS,
__TCA_NETEM_MAX,
};
@@ -494,6 +495,31 @@ struct tc_netem_corrupt {
__u32 correlation;
};
+enum {
+ NETEM_LOSS_UNSPEC,
+ NETEM_LOSS_GI, /* General Intuitive - 4 state model */
+ NETEM_LOSS_GE, /* Gilbert Elliot models */
+ __NETEM_LOSS_MAX
+};
+#define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1)
+
+/* State transition probablities for 4 state model */
+struct tc_netem_gimodel {
+ __u32 p13;
+ __u32 p31;
+ __u32 p32;
+ __u32 p14;
+ __u32 p23;
+};
+
+/* Gilbert-Elliot models */
+struct tc_netem_gemodel {
+ __u32 p;
+ __u32 r;
+ __u32 h;
+ __u32 k1;
+};
+
#define NETEM_DIST_SCALE 8192
#define NETEM_DIST_MAX 16384
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH net-next 6/6] netem: update version and cleanup
2011-02-23 23:04 [PATCH net-next 0/6] netem patches Stephen Hemminger
` (4 preceding siblings ...)
2011-02-23 23:04 ` [PATCH net-next 5/6] netem: revised correlated loss generator Stephen Hemminger
@ 2011-02-23 23:04 ` Stephen Hemminger
2011-02-25 6:14 ` [PATCH net-next 0/6] netem patches David Miller
6 siblings, 0 replies; 8+ messages in thread
From: Stephen Hemminger @ 2011-02-23 23:04 UTC (permalink / raw)
To: David S. Miller; +Cc: netdev
[-- Attachment #1: netem-update.patch --]
[-- Type: text/plain, Size: 1532 bytes --]
Get rid of debug message that are not useful, and enable
the log messages in case of error.
Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
---
net/sched/sch_netem.c | 10 ++++------
1 file changed, 4 insertions(+), 6 deletions(-)
--- a/net/sched/sch_netem.c 2011-02-23 15:01:29.159338952 -0800
+++ b/net/sched/sch_netem.c 2011-02-23 15:02:00.451849096 -0800
@@ -24,7 +24,7 @@
#include <net/netlink.h>
#include <net/pkt_sched.h>
-#define VERSION "1.2"
+#define VERSION "1.3"
/* Network Emulation Queuing algorithm.
====================================
@@ -311,8 +311,6 @@ static int netem_enqueue(struct sk_buff
int ret;
int count = 1;
- pr_debug("netem_enqueue skb=%p\n", skb);
-
/* Random duplication */
if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
++count;
@@ -633,7 +631,7 @@ static int netem_change(struct Qdisc *sc
ret = fifo_set_limit(q->qdisc, qopt->limit);
if (ret) {
- pr_debug("netem: can't set fifo limit\n");
+ pr_info("netem: can't set fifo limit\n");
return ret;
}
@@ -769,13 +767,13 @@ static int netem_init(struct Qdisc *sch,
q->qdisc = qdisc_create_dflt(sch->dev_queue, &tfifo_qdisc_ops,
TC_H_MAKE(sch->handle, 1));
if (!q->qdisc) {
- pr_debug("netem: qdisc create failed\n");
+ pr_notice("netem: qdisc create tfifo qdisc failed\n");
return -ENOMEM;
}
ret = netem_change(sch, opt);
if (ret) {
- pr_debug("netem: change failed\n");
+ pr_info("netem: change failed\n");
qdisc_destroy(q->qdisc);
}
return ret;
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH net-next 0/6] netem patches
2011-02-23 23:04 [PATCH net-next 0/6] netem patches Stephen Hemminger
` (5 preceding siblings ...)
2011-02-23 23:04 ` [PATCH net-next 6/6] netem: update version and cleanup Stephen Hemminger
@ 2011-02-25 6:14 ` David Miller
6 siblings, 0 replies; 8+ messages in thread
From: David Miller @ 2011-02-25 6:14 UTC (permalink / raw)
To: shemminger; +Cc: netdev
From: Stephen Hemminger <shemminger@vyatta.com>
Date: Wed, 23 Feb 2011 15:04:16 -0800
> These have been resting in a moldy place for far to long.
> The most important is the integration of a better packet
> loss model for netem
All applied, thanks Stephen.
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2011-02-25 6:14 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2011-02-23 23:04 [PATCH net-next 0/6] netem patches Stephen Hemminger
2011-02-23 23:04 ` [PATCH net-next 1/6] [PATCH 5/9] netem: cleanup dump code Stephen Hemminger
2011-02-23 23:04 ` [PATCH net-next 2/6] netem: use vmalloc for distribution table Stephen Hemminger
2011-02-23 23:04 ` [PATCH net-next 3/6] netem: define NETEM_DIST_MAX Stephen Hemminger
2011-02-23 23:04 ` [PATCH net-next 4/6] [PATCH] Revert "sch_netem: Remove classful functionality" Stephen Hemminger
2011-02-23 23:04 ` [PATCH net-next 5/6] netem: revised correlated loss generator Stephen Hemminger
2011-02-23 23:04 ` [PATCH net-next 6/6] netem: update version and cleanup Stephen Hemminger
2011-02-25 6:14 ` [PATCH net-next 0/6] netem patches David Miller
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).