* [PATCH v2 net-next 0/3] netem: add nsec scheduling and slot feature
@ 2017-11-08 21:09 Dave Taht
2017-11-08 21:09 ` [PATCH v2 net-next 1/3] netem: convert to qdisc_watchdog_schedule_ns Dave Taht
` (2 more replies)
0 siblings, 3 replies; 9+ messages in thread
From: Dave Taht @ 2017-11-08 21:09 UTC (permalink / raw)
To: netdev; +Cc: stephen, Dave Taht
This patch series converts netem away from the old "ticks" interface and
userspace API, and adds support for a new "slot" feature intended to
emulate bursty macs such as WiFi and LTE better.
Changes since v1:
Always pass new nanosecond APIs to userspace
Dave Taht (3):
netem: convert to qdisc_watchdog_schedule_ns
netem: add uapi to express delay and jitter in nanoseconds
netem: support delivering packets in delayed time slots
include/uapi/linux/pkt_sched.h | 10 +++
net/sched/sch_netem.c | 142 ++++++++++++++++++++++++++++++++---------
2 files changed, 123 insertions(+), 29 deletions(-)
--
2.7.4
^ permalink raw reply [flat|nested] 9+ messages in thread
* [PATCH v2 net-next 1/3] netem: convert to qdisc_watchdog_schedule_ns
2017-11-08 21:09 [PATCH v2 net-next 0/3] netem: add nsec scheduling and slot feature Dave Taht
@ 2017-11-08 21:09 ` Dave Taht
2017-11-08 22:20 ` Eric Dumazet
` (2 more replies)
2017-11-08 21:09 ` [PATCH v2 net-next 2/3] netem: add uapi to express delay and jitter in nanoseconds Dave Taht
2017-11-08 21:09 ` [PATCH v2 net-next 3/3] netem: support delivering packets in delayed time slots Dave Taht
2 siblings, 3 replies; 9+ messages in thread
From: Dave Taht @ 2017-11-08 21:09 UTC (permalink / raw)
To: netdev; +Cc: stephen, Dave Taht
Upgrade the internal netem scheduler to use nanoseconds rather than
ticks throughout.
Convert to and from the std "ticks" userspace api automatically,
while allowing for finer grained scheduling to take place.
Signed-off-by: Dave Taht <dave.taht@gmail.com>
---
net/sched/sch_netem.c | 56 +++++++++++++++++++++++++--------------------------
1 file changed, 28 insertions(+), 28 deletions(-)
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index db0228a..5559ad1 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -77,8 +77,8 @@ struct netem_sched_data {
struct qdisc_watchdog watchdog;
- psched_tdiff_t latency;
- psched_tdiff_t jitter;
+ s64 latency;
+ s64 jitter;
u32 loss;
u32 ecn;
@@ -145,7 +145,7 @@ struct netem_sched_data {
* we save skb->tstamp value in skb->cb[] before destroying it.
*/
struct netem_skb_cb {
- psched_time_t time_to_send;
+ u64 time_to_send;
};
static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
@@ -305,11 +305,11 @@ static bool loss_event(struct netem_sched_data *q)
* std deviation sigma. Uses table lookup to approximate the desired
* distribution, and a uniformly-distributed pseudo-random source.
*/
-static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
- struct crndstate *state,
- const struct disttable *dist)
+static s64 tabledist(s64 mu, s64 sigma,
+ struct crndstate *state,
+ const struct disttable *dist)
{
- psched_tdiff_t x;
+ s64 x;
long t;
u32 rnd;
@@ -332,10 +332,10 @@ static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
}
-static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q)
+static s64 packet_len_2_sched_time(unsigned int len,
+ struct netem_sched_data *q)
{
- u64 ticks;
-
+ s64 offset;
len += q->packet_overhead;
if (q->cell_size) {
@@ -345,11 +345,9 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche
cells++;
len = cells * (q->cell_size + q->cell_overhead);
}
-
- ticks = (u64)len * NSEC_PER_SEC;
-
- do_div(ticks, q->rate);
- return PSCHED_NS2TICKS(ticks);
+ offset = (s64)len * NSEC_PER_SEC;
+ do_div(offset, q->rate);
+ return offset;
}
static void tfifo_reset(struct Qdisc *sch)
@@ -369,7 +367,7 @@ static void tfifo_reset(struct Qdisc *sch)
static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
{
struct netem_sched_data *q = qdisc_priv(sch);
- psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
+ u64 tnext = netem_skb_cb(nskb)->time_to_send;
struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
while (*p) {
@@ -515,13 +513,13 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (q->gap == 0 || /* not doing reordering */
q->counter < q->gap - 1 || /* inside last reordering gap */
q->reorder < get_crandom(&q->reorder_cor)) {
- psched_time_t now;
- psched_tdiff_t delay;
+ u64 now;
+ s64 delay;
delay = tabledist(q->latency, q->jitter,
&q->delay_cor, q->delay_dist);
- now = psched_get_time();
+ now = ktime_get_ns();
if (q->rate) {
struct netem_skb_cb *last = NULL;
@@ -547,7 +545,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
* from delay.
*/
delay -= last->time_to_send - now;
- delay = max_t(psched_tdiff_t, 0, delay);
+ delay = max_t(s64, 0, delay);
now = last->time_to_send;
}
@@ -562,7 +560,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
* Do re-ordering by putting one out of N packets at the front
* of the queue.
*/
- cb->time_to_send = psched_get_time();
+ cb->time_to_send = ktime_get_ns();
q->counter = 0;
netem_enqueue_skb_head(&sch->q, skb);
@@ -609,13 +607,13 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
}
p = rb_first(&q->t_root);
if (p) {
- psched_time_t time_to_send;
+ u64 time_to_send;
skb = rb_to_skb(p);
/* if more time remaining? */
time_to_send = netem_skb_cb(skb)->time_to_send;
- if (time_to_send <= psched_get_time()) {
+ if (time_to_send <= ktime_get_ns()) {
rb_erase(p, &q->t_root);
sch->q.qlen--;
@@ -659,7 +657,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
if (skb)
goto deliver;
}
- qdisc_watchdog_schedule(&q->watchdog, time_to_send);
+ qdisc_watchdog_schedule_ns(&q->watchdog, time_to_send);
}
if (q->qdisc) {
@@ -888,8 +886,8 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
sch->limit = qopt->limit;
- q->latency = qopt->latency;
- q->jitter = qopt->jitter;
+ q->latency = PSCHED_TICKS2NS(qopt->latency);
+ q->jitter = PSCHED_TICKS2NS(qopt->jitter);
q->limit = qopt->limit;
q->gap = qopt->gap;
q->counter = 0;
@@ -1011,8 +1009,10 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
struct tc_netem_corrupt corrupt;
struct tc_netem_rate rate;
- qopt.latency = q->latency;
- qopt.jitter = q->jitter;
+ qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency),
+ UINT_MAX);
+ qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter),
+ UINT_MAX);
qopt.limit = q->limit;
qopt.loss = q->loss;
qopt.gap = q->gap;
--
2.7.4
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [PATCH v2 net-next 2/3] netem: add uapi to express delay and jitter in nanoseconds
2017-11-08 21:09 [PATCH v2 net-next 0/3] netem: add nsec scheduling and slot feature Dave Taht
2017-11-08 21:09 ` [PATCH v2 net-next 1/3] netem: convert to qdisc_watchdog_schedule_ns Dave Taht
@ 2017-11-08 21:09 ` Dave Taht
2017-11-08 22:29 ` Eric Dumazet
2017-11-08 21:09 ` [PATCH v2 net-next 3/3] netem: support delivering packets in delayed time slots Dave Taht
2 siblings, 1 reply; 9+ messages in thread
From: Dave Taht @ 2017-11-08 21:09 UTC (permalink / raw)
To: netdev; +Cc: stephen, Dave Taht
netem userspace has long relied on a horrible /proc/net/psched hack
to translate the current notion of "ticks" to nanoseconds.
Expressing latency and jitter instead, in well defined nanoseconds,
increases the dynamic range of emulated delays and jitter in netem.
It will also ease a transition where reducing a tick to nsec
equivalence would constrain the max delay in prior versions of
netem to only 4.3 seconds.
Signed-off-by: Dave Taht <dave.taht@gmail.com>
---
include/uapi/linux/pkt_sched.h | 2 ++
net/sched/sch_netem.c | 14 ++++++++++++++
2 files changed, 16 insertions(+)
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 6a2c5ea..8fe6d18 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -537,6 +537,8 @@ enum {
TCA_NETEM_ECN,
TCA_NETEM_RATE64,
TCA_NETEM_PAD,
+ TCA_NETEM_LATENCY64,
+ TCA_NETEM_JITTER64,
__TCA_NETEM_MAX,
};
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 5559ad1..ef63ae4 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -819,6 +819,8 @@ static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
[TCA_NETEM_LOSS] = { .type = NLA_NESTED },
[TCA_NETEM_ECN] = { .type = NLA_U32 },
[TCA_NETEM_RATE64] = { .type = NLA_U64 },
+ [TCA_NETEM_LATENCY64] = { .type = NLA_S64 },
+ [TCA_NETEM_JITTER64] = { .type = NLA_S64 },
};
static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
@@ -916,6 +918,12 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
q->rate = max_t(u64, q->rate,
nla_get_u64(tb[TCA_NETEM_RATE64]));
+ if (tb[TCA_NETEM_LATENCY64])
+ q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
+
+ if (tb[TCA_NETEM_JITTER64])
+ q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
+
if (tb[TCA_NETEM_ECN])
q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
@@ -1020,6 +1028,12 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
goto nla_put_failure;
+ if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency))
+ goto nla_put_failure;
+
+ if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter))
+ goto nla_put_failure;
+
cor.delay_corr = q->delay_cor.rho;
cor.loss_corr = q->loss_cor.rho;
cor.dup_corr = q->dup_cor.rho;
--
2.7.4
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [PATCH v2 net-next 3/3] netem: support delivering packets in delayed time slots
2017-11-08 21:09 [PATCH v2 net-next 0/3] netem: add nsec scheduling and slot feature Dave Taht
2017-11-08 21:09 ` [PATCH v2 net-next 1/3] netem: convert to qdisc_watchdog_schedule_ns Dave Taht
2017-11-08 21:09 ` [PATCH v2 net-next 2/3] netem: add uapi to express delay and jitter in nanoseconds Dave Taht
@ 2017-11-08 21:09 ` Dave Taht
2017-11-08 22:40 ` Eric Dumazet
2 siblings, 1 reply; 9+ messages in thread
From: Dave Taht @ 2017-11-08 21:09 UTC (permalink / raw)
To: netdev; +Cc: stephen, Dave Taht
Slotting is a crude approximation of the behaviors of shared media such
as cable, wifi, and LTE, which gather up a bunch of packets within a
varying delay window and deliver them, relative to that, nearly all at
once.
It works within the existing loss, duplication, jitter and delay
parameters of netem. Some amount of inherent latency must be specified,
regardless.
The new "slot" parameter specifies a minimum and maximum delay between
transmission attempts.
The "bytes" and "packets" parameters can be used to limit the amount of
information transferred per slot.
Examples of use:
tc qdisc add dev eth0 root netem delay 200us \
slot 800us 10ms bytes 64k packets 42
A more correct example, using stacked netem instances and a packet limit
to emulate a tail drop wifi queue with slots and variable packet
delivery, with a 200Mbit isochronous underlying rate, and 20ms path
delay:
tc qdisc add dev eth0 root handle 1: netem delay 20ms rate 200mbit \
limit 10000
tc qdisc add dev eth0 parent 1:1 handle 10:1 netem delay 200us \
slot 800us 10ms bytes 64k packets 42 limit 512
Signed-off-by: Dave Taht <dave.taht@gmail.com>
---
include/uapi/linux/pkt_sched.h | 8 +++++
net/sched/sch_netem.c | 76 ++++++++++++++++++++++++++++++++++++++++--
2 files changed, 81 insertions(+), 3 deletions(-)
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 8fe6d18..af3cc2f 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -539,6 +539,7 @@ enum {
TCA_NETEM_PAD,
TCA_NETEM_LATENCY64,
TCA_NETEM_JITTER64,
+ TCA_NETEM_SLOT,
__TCA_NETEM_MAX,
};
@@ -576,6 +577,13 @@ struct tc_netem_rate {
__s32 cell_overhead;
};
+struct tc_netem_slot {
+ __s64 min_delay; /* nsec */
+ __s64 max_delay;
+ __s32 max_packets;
+ __s32 max_bytes;
+};
+
enum {
NETEM_LOSS_UNSPEC,
NETEM_LOSS_GI, /* General Intuitive - 4 state model */
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index ef63ae4..b697f89 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -135,6 +135,13 @@ struct netem_sched_data {
u32 a5; /* p23 used only in 4-states */
} clg;
+ struct tc_netem_slot slot_config;
+ struct slotstate {
+ u64 slot_next;
+ s32 packets_left;
+ s32 bytes_left;
+ } slot;
+
};
/* Time stamp put into socket buffer control block
@@ -591,6 +598,20 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return NET_XMIT_SUCCESS;
}
+/* Delay the next round with a new future slot with a
+ * correct number of bytes and packets.
+ */
+
+static void get_slot_next(struct netem_sched_data *q, u64 now)
+{
+ q->slot.slot_next = now + q->slot_config.min_delay +
+ (prandom_u32() *
+ (q->slot_config.max_delay -
+ q->slot_config.min_delay) >> 32);
+ q->slot.packets_left = q->slot_config.max_packets;
+ q->slot.bytes_left = q->slot_config.max_bytes;
+}
+
static struct sk_buff *netem_dequeue(struct Qdisc *sch)
{
struct netem_sched_data *q = qdisc_priv(sch);
@@ -608,14 +629,17 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
p = rb_first(&q->t_root);
if (p) {
u64 time_to_send;
+ u64 now = ktime_get_ns();
skb = rb_to_skb(p);
/* if more time remaining? */
time_to_send = netem_skb_cb(skb)->time_to_send;
- if (time_to_send <= ktime_get_ns()) {
- rb_erase(p, &q->t_root);
+ if (q->slot.slot_next && q->slot.slot_next < time_to_send)
+ get_slot_next(q, now);
+ if (time_to_send <= now && q->slot.slot_next <= now) {
+ rb_erase(p, &q->t_root);
sch->q.qlen--;
qdisc_qstats_backlog_dec(sch, skb);
skb->next = NULL;
@@ -634,6 +658,14 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
skb->tstamp = 0;
#endif
+ if (q->slot.slot_next) {
+ q->slot.packets_left--;
+ q->slot.bytes_left -= qdisc_pkt_len(skb);
+ if (q->slot.packets_left <= 0 ||
+ q->slot.bytes_left <= 0)
+ get_slot_next(q, now);
+ }
+
if (q->qdisc) {
unsigned int pkt_len = qdisc_pkt_len(skb);
struct sk_buff *to_free = NULL;
@@ -657,7 +689,12 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
if (skb)
goto deliver;
}
- qdisc_watchdog_schedule_ns(&q->watchdog, time_to_send);
+
+ if (q->slot.slot_next > now)
+ qdisc_watchdog_schedule_ns(&q->watchdog,
+ q->slot.slot_next);
+ else
+ qdisc_watchdog_schedule_ns(&q->watchdog, time_to_send);
}
if (q->qdisc) {
@@ -688,6 +725,7 @@ static void dist_free(struct disttable *d)
* Distribution data is a variable size payload containing
* signed 16 bit values.
*/
+
static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
{
struct netem_sched_data *q = qdisc_priv(sch);
@@ -718,6 +756,23 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
return 0;
}
+static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
+{
+ const struct tc_netem_slot *c = nla_data(attr);
+
+ q->slot_config = *c;
+ if (q->slot_config.max_packets == 0)
+ q->slot_config.max_packets = INT_MAX;
+ if (q->slot_config.max_bytes == 0)
+ q->slot_config.max_bytes = INT_MAX;
+ q->slot.packets_left = q->slot_config.max_packets;
+ q->slot.bytes_left = q->slot_config.max_bytes;
+ if (q->slot_config.min_delay | q->slot_config.max_delay)
+ q->slot.slot_next = ktime_get_ns();
+ else
+ q->slot.slot_next = 0;
+}
+
static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
{
const struct tc_netem_corr *c = nla_data(attr);
@@ -821,6 +876,7 @@ static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
[TCA_NETEM_RATE64] = { .type = NLA_U64 },
[TCA_NETEM_LATENCY64] = { .type = NLA_S64 },
[TCA_NETEM_JITTER64] = { .type = NLA_S64 },
+ [TCA_NETEM_SLOT] = { .len = sizeof(struct tc_netem_slot) },
};
static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
@@ -927,6 +983,9 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
if (tb[TCA_NETEM_ECN])
q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
+ if (tb[TCA_NETEM_SLOT])
+ get_slot(q, tb[TCA_NETEM_SLOT]);
+
return ret;
}
@@ -1016,6 +1075,7 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
struct tc_netem_reorder reorder;
struct tc_netem_corrupt corrupt;
struct tc_netem_rate rate;
+ struct tc_netem_slot slot;
qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency),
UINT_MAX);
@@ -1070,6 +1130,16 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
if (dump_loss_model(q, skb) != 0)
goto nla_put_failure;
+ if (q->slot_config.min_delay | q->slot_config.max_delay) {
+ slot = q->slot_config;
+ if (slot.max_packets == INT_MAX)
+ slot.max_packets = 0;
+ if (slot.max_bytes == INT_MAX)
+ slot.max_bytes = 0;
+ if (nla_put(skb, TCA_NETEM_SLOT, sizeof(slot), &slot))
+ goto nla_put_failure;
+ }
+
return nla_nest_end(skb, nla);
nla_put_failure:
--
2.7.4
^ permalink raw reply related [flat|nested] 9+ messages in thread
* Re: [PATCH v2 net-next 1/3] netem: convert to qdisc_watchdog_schedule_ns
2017-11-08 21:09 ` [PATCH v2 net-next 1/3] netem: convert to qdisc_watchdog_schedule_ns Dave Taht
@ 2017-11-08 22:20 ` Eric Dumazet
2017-11-11 12:08 ` kbuild test robot
2017-11-11 12:28 ` kbuild test robot
2 siblings, 0 replies; 9+ messages in thread
From: Eric Dumazet @ 2017-11-08 22:20 UTC (permalink / raw)
To: Dave Taht; +Cc: netdev, stephen
On Wed, 2017-11-08 at 13:09 -0800, Dave Taht wrote:
> Upgrade the internal netem scheduler to use nanoseconds rather than
> ticks throughout.
>
...
> -static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q)
> +static s64 packet_len_2_sched_time(unsigned int len,
> + struct netem_sched_data *q)
> {
> - u64 ticks;
> -
> + s64 offset;
> len += q->packet_overhead;
>
> if (q->cell_size) {
> @@ -345,11 +345,9 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche
> cells++;
> len = cells * (q->cell_size + q->cell_overhead);
> }
> -
> - ticks = (u64)len * NSEC_PER_SEC;
> -
> - do_div(ticks, q->rate);
> - return PSCHED_NS2TICKS(ticks);
> + offset = (s64)len * NSEC_PER_SEC;
> + do_div(offset, q->rate);
> + return offset;
> }
do_div() first argument being u64, I do not see why you chose
's64 offset'
packet_len_2_sched_time() should return u64, because I do not see how we
could return a negative value, since a packet length is positive.
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH v2 net-next 2/3] netem: add uapi to express delay and jitter in nanoseconds
2017-11-08 21:09 ` [PATCH v2 net-next 2/3] netem: add uapi to express delay and jitter in nanoseconds Dave Taht
@ 2017-11-08 22:29 ` Eric Dumazet
0 siblings, 0 replies; 9+ messages in thread
From: Eric Dumazet @ 2017-11-08 22:29 UTC (permalink / raw)
To: Dave Taht; +Cc: netdev, stephen
On Wed, 2017-11-08 at 13:09 -0800, Dave Taht wrote:
> netem userspace has long relied on a horrible /proc/net/psched hack
> to translate the current notion of "ticks" to nanoseconds.
>
> Expressing latency and jitter instead, in well defined nanoseconds,
> increases the dynamic range of emulated delays and jitter in netem.
>
> It will also ease a transition where reducing a tick to nsec
> equivalence would constrain the max delay in prior versions of
> netem to only 4.3 seconds.
>
> Signed-off-by: Dave Taht <dave.taht@gmail.com>
> ---
Suggested-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH v2 net-next 3/3] netem: support delivering packets in delayed time slots
2017-11-08 21:09 ` [PATCH v2 net-next 3/3] netem: support delivering packets in delayed time slots Dave Taht
@ 2017-11-08 22:40 ` Eric Dumazet
0 siblings, 0 replies; 9+ messages in thread
From: Eric Dumazet @ 2017-11-08 22:40 UTC (permalink / raw)
To: Dave Taht; +Cc: netdev, stephen
On Wed, 2017-11-08 at 13:09 -0800, Dave Taht wrote:
> Slotting is a crude approximation of the behaviors of shared media such
> as cable, wifi, and LTE, which gather up a bunch of packets within a
> varying delay window and deliver them, relative to that, nearly all at
> once.
> - qdisc_watchdog_schedule_ns(&q->watchdog, time_to_send);
> +
> + if (q->slot.slot_next > now)
> + qdisc_watchdog_schedule_ns(&q->watchdog,
> + q->slot.slot_next);
> + else
> + qdisc_watchdog_schedule_ns(&q->watchdog, time_to_send);
This looks weird.
You might incur an extra timer event in the following case :
now < q->slot.slot_next < time_to_send
I would rather do
qdisc_watchdog_schedule_ns(&q->watchdog,
max(time_to_send, q->slot.slot_next));
So that a single timer expiration is needed.
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH v2 net-next 1/3] netem: convert to qdisc_watchdog_schedule_ns
2017-11-08 21:09 ` [PATCH v2 net-next 1/3] netem: convert to qdisc_watchdog_schedule_ns Dave Taht
2017-11-08 22:20 ` Eric Dumazet
@ 2017-11-11 12:08 ` kbuild test robot
2017-11-11 12:28 ` kbuild test robot
2 siblings, 0 replies; 9+ messages in thread
From: kbuild test robot @ 2017-11-11 12:08 UTC (permalink / raw)
To: Dave Taht; +Cc: kbuild-all, netdev, stephen, Dave Taht
[-- Attachment #1: Type: text/plain, Size: 2179 bytes --]
Hi Dave,
Thank you for the patch! Perhaps something to improve:
[auto build test WARNING on net-next/master]
url: https://github.com/0day-ci/linux/commits/Dave-Taht/netem-convert-to-qdisc_watchdog_schedule_ns/20171111-184934
config: xtensa-allyesconfig (attached as .config)
compiler: xtensa-linux-gcc (GCC) 4.9.0
reproduce:
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# save the attached .config to linux build tree
make.cross ARCH=xtensa
All warnings (new ones prefixed by >>):
In file included from ./arch/xtensa/include/generated/asm/div64.h:1:0,
from include/linux/kernel.h:173,
from include/asm-generic/bug.h:16,
from ./arch/xtensa/include/generated/asm/bug.h:1,
from include/linux/bug.h:5,
from include/linux/mmdebug.h:5,
from include/linux/mm.h:9,
from net/sched/sch_netem.c:16:
net/sched/sch_netem.c: In function 'packet_len_2_sched_time':
include/asm-generic/div64.h:208:28: warning: comparison of distinct pointer types lacks a cast
(void)(((typeof((n)) *)0) == ((uint64_t *)0)); \
^
>> net/sched/sch_netem.c:349:2: note: in expansion of macro 'do_div'
do_div(offset, q->rate);
^
vim +/do_div +349 net/sched/sch_netem.c
334
335 static s64 packet_len_2_sched_time(unsigned int len,
336 struct netem_sched_data *q)
337 {
338 s64 offset;
339 len += q->packet_overhead;
340
341 if (q->cell_size) {
342 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
343
344 if (len > cells * q->cell_size) /* extra cell needed for remainder */
345 cells++;
346 len = cells * (q->cell_size + q->cell_overhead);
347 }
348 offset = (s64)len * NSEC_PER_SEC;
> 349 do_div(offset, q->rate);
350 return offset;
351 }
352
---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation
[-- Attachment #2: .config.gz --]
[-- Type: application/gzip, Size: 51688 bytes --]
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH v2 net-next 1/3] netem: convert to qdisc_watchdog_schedule_ns
2017-11-08 21:09 ` [PATCH v2 net-next 1/3] netem: convert to qdisc_watchdog_schedule_ns Dave Taht
2017-11-08 22:20 ` Eric Dumazet
2017-11-11 12:08 ` kbuild test robot
@ 2017-11-11 12:28 ` kbuild test robot
2 siblings, 0 replies; 9+ messages in thread
From: kbuild test robot @ 2017-11-11 12:28 UTC (permalink / raw)
To: Dave Taht; +Cc: kbuild-all, netdev, stephen, Dave Taht
[-- Attachment #1: Type: text/plain, Size: 3186 bytes --]
Hi Dave,
Thank you for the patch! Yet something to improve:
[auto build test ERROR on net-next/master]
url: https://github.com/0day-ci/linux/commits/Dave-Taht/netem-convert-to-qdisc_watchdog_schedule_ns/20171111-184934
config: i386-randconfig-i1-201745 (attached as .config)
compiler: gcc-6 (Debian 6.4.0-9) 6.4.0 20171026
reproduce:
# save the attached .config to linux build tree
make ARCH=i386
All errors (new ones prefixed by >>):
net/sched/sch_netem.o: In function `netem_enqueue':
>> net/sched/sch_netem.c:323: undefined reference to `__moddi3'
vim +323 net/sched/sch_netem.c
661b7972 stephen hemminger 2011-02-23 302
661b7972 stephen hemminger 2011-02-23 303
^1da177e Linus Torvalds 2005-04-16 304 /* tabledist - return a pseudo-randomly distributed value with mean mu and
^1da177e Linus Torvalds 2005-04-16 305 * std deviation sigma. Uses table lookup to approximate the desired
^1da177e Linus Torvalds 2005-04-16 306 * distribution, and a uniformly-distributed pseudo-random source.
^1da177e Linus Torvalds 2005-04-16 307 */
9d0cec66 Dave Taht 2017-11-08 308 static s64 tabledist(s64 mu, s64 sigma,
b407621c Stephen Hemminger 2007-03-22 309 struct crndstate *state,
b407621c Stephen Hemminger 2007-03-22 310 const struct disttable *dist)
^1da177e Linus Torvalds 2005-04-16 311 {
9d0cec66 Dave Taht 2017-11-08 312 s64 x;
b407621c Stephen Hemminger 2007-03-22 313 long t;
b407621c Stephen Hemminger 2007-03-22 314 u32 rnd;
^1da177e Linus Torvalds 2005-04-16 315
^1da177e Linus Torvalds 2005-04-16 316 if (sigma == 0)
^1da177e Linus Torvalds 2005-04-16 317 return mu;
^1da177e Linus Torvalds 2005-04-16 318
^1da177e Linus Torvalds 2005-04-16 319 rnd = get_crandom(state);
^1da177e Linus Torvalds 2005-04-16 320
^1da177e Linus Torvalds 2005-04-16 321 /* default uniform distribution */
^1da177e Linus Torvalds 2005-04-16 322 if (dist == NULL)
^1da177e Linus Torvalds 2005-04-16 @323 return (rnd % (2*sigma)) - sigma + mu;
^1da177e Linus Torvalds 2005-04-16 324
^1da177e Linus Torvalds 2005-04-16 325 t = dist->table[rnd % dist->size];
^1da177e Linus Torvalds 2005-04-16 326 x = (sigma % NETEM_DIST_SCALE) * t;
^1da177e Linus Torvalds 2005-04-16 327 if (x >= 0)
^1da177e Linus Torvalds 2005-04-16 328 x += NETEM_DIST_SCALE/2;
^1da177e Linus Torvalds 2005-04-16 329 else
^1da177e Linus Torvalds 2005-04-16 330 x -= NETEM_DIST_SCALE/2;
^1da177e Linus Torvalds 2005-04-16 331
^1da177e Linus Torvalds 2005-04-16 332 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
^1da177e Linus Torvalds 2005-04-16 333 }
^1da177e Linus Torvalds 2005-04-16 334
:::::: The code at line 323 was first introduced by commit
:::::: 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 Linux-2.6.12-rc2
:::::: TO: Linus Torvalds <torvalds@ppc970.osdl.org>
:::::: CC: Linus Torvalds <torvalds@ppc970.osdl.org>
---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation
[-- Attachment #2: .config.gz --]
[-- Type: application/gzip, Size: 34923 bytes --]
^ permalink raw reply [flat|nested] 9+ messages in thread
end of thread, other threads:[~2017-11-11 12:17 UTC | newest]
Thread overview: 9+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2017-11-08 21:09 [PATCH v2 net-next 0/3] netem: add nsec scheduling and slot feature Dave Taht
2017-11-08 21:09 ` [PATCH v2 net-next 1/3] netem: convert to qdisc_watchdog_schedule_ns Dave Taht
2017-11-08 22:20 ` Eric Dumazet
2017-11-11 12:08 ` kbuild test robot
2017-11-11 12:28 ` kbuild test robot
2017-11-08 21:09 ` [PATCH v2 net-next 2/3] netem: add uapi to express delay and jitter in nanoseconds Dave Taht
2017-11-08 22:29 ` Eric Dumazet
2017-11-08 21:09 ` [PATCH v2 net-next 3/3] netem: support delivering packets in delayed time slots Dave Taht
2017-11-08 22:40 ` Eric Dumazet
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).