* [NET_SCHED 01/10]: Use ktime as clocksource
2007-03-16 5:30 [NET_SCHED 00/10]: ktime clocksource + hrtimer Patrick McHardy
@ 2007-03-16 5:30 ` Patrick McHardy
2007-03-16 5:30 ` [NET_SCHED 02/10]: Add hrtimer based qdisc watchdog Patrick McHardy
` (9 subsequent siblings)
10 siblings, 0 replies; 16+ messages in thread
From: Patrick McHardy @ 2007-03-16 5:30 UTC (permalink / raw)
To: davem; +Cc: devik, netdev, Patrick McHardy, shemminger
[NET_SCHED]: Use ktime as clocksource
Get rid of the manual clock source selection mess and use ktime. Also
use a scalar representation, which allows to clean up pkt_sched.h a bit
more and results in less ktime_to_ns() calls in most cases.
The PSCHED_US2JIFFIE/PSCHED_JIFFIE2US macros are implemented quite
inefficient by this patch, following patches will convert all qdiscs
to hrtimers and get rid of them entirely.
Signed-off-by: Patrick McHardy <kaber@trash.net>
---
commit e400fa6d9e7c13d675b96958a967e747148e9b70
tree 14d5c4061e84d55e5c7633ee84c9eb098adbe709
parent c10208186a58d1149805d8c64a2846d490fce2b5
author Patrick McHardy <kaber@trash.net> Fri, 16 Mar 2007 04:44:18 +0100
committer Patrick McHardy <kaber@trash.net> Fri, 16 Mar 2007 04:44:18 +0100
include/net/pkt_sched.h | 169 ++++-------------------------------------------
kernel/hrtimer.c | 1
net/sched/Kconfig | 56 ----------------
net/sched/sch_api.c | 77 +--------------------
net/sched/sch_hfsc.c | 31 +--------
5 files changed, 19 insertions(+), 315 deletions(-)
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index f6afee7..1c12afd 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -2,6 +2,7 @@ #ifndef __NET_PKT_SCHED_H
#define __NET_PKT_SCHED_H
#include <linux/jiffies.h>
+#include <linux/ktime.h>
#include <net/sch_generic.h>
struct qdisc_walker
@@ -37,176 +38,32 @@ static inline void *qdisc_priv(struct Qd
The things are not so bad, because we may use artifical
clock evaluated by integration of network data flow
in the most critical places.
-
- Note: we do not use fastgettimeofday.
- The reason is that, when it is not the same thing as
- gettimeofday, it returns invalid timestamp, which is
- not updated, when net_bh is active.
*/
-/* General note about internal clock.
-
- Any clock source returns time intervals, measured in units
- close to 1usec. With source CONFIG_NET_SCH_CLK_GETTIMEOFDAY it is precisely
- microseconds, otherwise something close but different chosen to minimize
- arithmetic cost. Ratio usec/internal untis in form nominator/denominator
- may be read from /proc/net/psched.
- */
-
-
-#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
-
-typedef struct timeval psched_time_t;
-typedef long psched_tdiff_t;
-
-#define PSCHED_GET_TIME(stamp) do_gettimeofday(&(stamp))
-#define PSCHED_US2JIFFIE(usecs) usecs_to_jiffies(usecs)
-#define PSCHED_JIFFIE2US(delay) jiffies_to_usecs(delay)
-
-#else /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
-
typedef u64 psched_time_t;
typedef long psched_tdiff_t;
-#ifdef CONFIG_NET_SCH_CLK_JIFFIES
-
-#if HZ < 96
-#define PSCHED_JSCALE 14
-#elif HZ >= 96 && HZ < 192
-#define PSCHED_JSCALE 13
-#elif HZ >= 192 && HZ < 384
-#define PSCHED_JSCALE 12
-#elif HZ >= 384 && HZ < 768
-#define PSCHED_JSCALE 11
-#elif HZ >= 768
-#define PSCHED_JSCALE 10
-#endif
-
-#define PSCHED_GET_TIME(stamp) ((stamp) = (get_jiffies_64()<<PSCHED_JSCALE))
-#define PSCHED_US2JIFFIE(delay) (((delay)+(1<<PSCHED_JSCALE)-1)>>PSCHED_JSCALE)
-#define PSCHED_JIFFIE2US(delay) ((delay)<<PSCHED_JSCALE)
-
-#endif /* CONFIG_NET_SCH_CLK_JIFFIES */
-#ifdef CONFIG_NET_SCH_CLK_CPU
-#include <asm/timex.h>
-
-extern psched_tdiff_t psched_clock_per_hz;
-extern int psched_clock_scale;
-extern psched_time_t psched_time_base;
-extern cycles_t psched_time_mark;
-
-#define PSCHED_GET_TIME(stamp) \
-do { \
- cycles_t cur = get_cycles(); \
- if (sizeof(cycles_t) == sizeof(u32)) { \
- if (cur <= psched_time_mark) \
- psched_time_base += 0x100000000ULL; \
- psched_time_mark = cur; \
- (stamp) = (psched_time_base + cur)>>psched_clock_scale; \
- } else { \
- (stamp) = cur>>psched_clock_scale; \
- } \
-} while (0)
-#define PSCHED_US2JIFFIE(delay) (((delay)+psched_clock_per_hz-1)/psched_clock_per_hz)
-#define PSCHED_JIFFIE2US(delay) ((delay)*psched_clock_per_hz)
-
-#endif /* CONFIG_NET_SCH_CLK_CPU */
-
-#endif /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
-
-#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
-#define PSCHED_TDIFF(tv1, tv2) \
-({ \
- int __delta_sec = (tv1).tv_sec - (tv2).tv_sec; \
- int __delta = (tv1).tv_usec - (tv2).tv_usec; \
- if (__delta_sec) { \
- switch (__delta_sec) { \
- default: \
- __delta = 0; \
- case 2: \
- __delta += USEC_PER_SEC; \
- case 1: \
- __delta += USEC_PER_SEC; \
- } \
- } \
- __delta; \
-})
-
-static inline int
-psched_tod_diff(int delta_sec, int bound)
-{
- int delta;
-
- if (bound <= USEC_PER_SEC || delta_sec > (0x7FFFFFFF/USEC_PER_SEC)-1)
- return bound;
- delta = delta_sec * USEC_PER_SEC;
- if (delta > bound || delta < 0)
- delta = bound;
- return delta;
-}
-
-#define PSCHED_TDIFF_SAFE(tv1, tv2, bound) \
-({ \
- int __delta_sec = (tv1).tv_sec - (tv2).tv_sec; \
- int __delta = (tv1).tv_usec - (tv2).tv_usec; \
- switch (__delta_sec) { \
- default: \
- __delta = psched_tod_diff(__delta_sec, bound); break; \
- case 2: \
- __delta += USEC_PER_SEC; \
- case 1: \
- __delta += USEC_PER_SEC; \
- case 0: \
- if (__delta > bound || __delta < 0) \
- __delta = bound; \
- } \
- __delta; \
-})
-
-#define PSCHED_TLESS(tv1, tv2) (((tv1).tv_usec < (tv2).tv_usec && \
- (tv1).tv_sec <= (tv2).tv_sec) || \
- (tv1).tv_sec < (tv2).tv_sec)
-
-#define PSCHED_TADD2(tv, delta, tv_res) \
-({ \
- int __delta = (tv).tv_usec + (delta); \
- (tv_res).tv_sec = (tv).tv_sec; \
- while (__delta >= USEC_PER_SEC) { (tv_res).tv_sec++; __delta -= USEC_PER_SEC; } \
- (tv_res).tv_usec = __delta; \
-})
-
-#define PSCHED_TADD(tv, delta) \
-({ \
- (tv).tv_usec += (delta); \
- while ((tv).tv_usec >= USEC_PER_SEC) { (tv).tv_sec++; \
- (tv).tv_usec -= USEC_PER_SEC; } \
-})
-
-/* Set/check that time is in the "past perfect";
- it depends on concrete representation of system time
- */
-
-#define PSCHED_SET_PASTPERFECT(t) ((t).tv_sec = 0)
-#define PSCHED_IS_PASTPERFECT(t) ((t).tv_sec == 0)
+/* Avoid doing 64 bit divide by 1000 */
+#define PSCHED_US2NS(x) ((s64)(x) << 10)
+#define PSCHED_NS2US(x) ((x) >> 10)
-#define PSCHED_AUDIT_TDIFF(t) ({ if ((t) > 2000000) (t) = 2000000; })
+#define PSCHED_TICKS_PER_SEC PSCHED_NS2US(NSEC_PER_SEC)
+#define PSCHED_GET_TIME(stamp) \
+ ((stamp) = PSCHED_NS2US(ktime_to_ns(ktime_get())))
-#else /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
+#define PSCHED_US2JIFFIE(usecs) usecs_to_jiffies(PSCHED_US2NS((usecs)) / NSEC_PER_USEC)
+#define PSCHED_JIFFIE2US(delay) PSCHED_NS2US(jiffies_to_usecs((delay)) * NSEC_PER_USEC)
-#define PSCHED_TDIFF(tv1, tv2) (long)((tv1) - (tv2))
+#define PSCHED_TDIFF(tv1, tv2) (long)((tv1) - (tv2))
#define PSCHED_TDIFF_SAFE(tv1, tv2, bound) \
- min_t(long long, (tv1) - (tv2), bound)
-
-
-#define PSCHED_TLESS(tv1, tv2) ((tv1) < (tv2))
+ min_t(long long, (tv1) - (tv2), bound)
+#define PSCHED_TLESS(tv1, tv2) ((tv1) < (tv2))
#define PSCHED_TADD2(tv, delta, tv_res) ((tv_res) = (tv) + (delta))
-#define PSCHED_TADD(tv, delta) ((tv) += (delta))
+#define PSCHED_TADD(tv, delta) ((tv) += (delta))
#define PSCHED_SET_PASTPERFECT(t) ((t) = 0)
#define PSCHED_IS_PASTPERFECT(t) ((t) == 0)
#define PSCHED_AUDIT_TDIFF(t)
-#endif /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
-
extern struct Qdisc_ops pfifo_qdisc_ops;
extern struct Qdisc_ops bfifo_qdisc_ops;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index ec4cb9f..8eda12c 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -59,6 +59,7 @@ ktime_t ktime_get(void)
return timespec_to_ktime(now);
}
+EXPORT_SYMBOL_GPL(ktime_get);
/**
* ktime_get_real - get the real (wall-) time in ktime_t format
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index f4544dd..475df84 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -46,62 +46,6 @@ config NET_SCH_FIFO
if NET_SCHED
-choice
- prompt "Packet scheduler clock source"
- default NET_SCH_CLK_GETTIMEOFDAY
- ---help---
- Packet schedulers need a monotonic clock that increments at a static
- rate. The kernel provides several suitable interfaces, each with
- different properties:
-
- - high resolution (us or better)
- - fast to read (minimal locking, no i/o access)
- - synchronized on all processors
- - handles cpu clock frequency changes
-
- but nothing provides all of the above.
-
-config NET_SCH_CLK_JIFFIES
- bool "Timer interrupt"
- ---help---
- Say Y here if you want to use the timer interrupt (jiffies) as clock
- source. This clock source is fast, synchronized on all processors and
- handles cpu clock frequency changes, but its resolution is too low
- for accurate shaping except at very low speed.
-
-config NET_SCH_CLK_GETTIMEOFDAY
- bool "gettimeofday"
- ---help---
- Say Y here if you want to use gettimeofday as clock source. This clock
- source has high resolution, is synchronized on all processors and
- handles cpu clock frequency changes, but it is slow.
-
- Choose this if you need a high resolution clock source but can't use
- the CPU's cycle counter.
-
-# don't allow on SMP x86 because they can have unsynchronized TSCs.
-# gettimeofday is a good alternative
-config NET_SCH_CLK_CPU
- bool "CPU cycle counter"
- depends on ((X86_TSC || X86_64) && !SMP) || ALPHA || SPARC64 || PPC64 || IA64
- ---help---
- Say Y here if you want to use the CPU's cycle counter as clock source.
- This is a cheap and high resolution clock source, but on some
- architectures it is not synchronized on all processors and doesn't
- handle cpu clock frequency changes.
-
- The useable cycle counters are:
-
- x86/x86_64 - Timestamp Counter
- alpha - Cycle Counter
- sparc64 - %ticks register
- ppc64 - Time base
- ia64 - Interval Time Counter
-
- Choose this if your CPU's cycle counter is working properly.
-
-endchoice
-
comment "Queueing/Scheduling"
config NET_SCH_CBQ
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 4a927a5..d71bf79 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1175,15 +1175,12 @@ #endif
return -1;
}
-static int psched_us_per_tick = 1;
-static int psched_tick_per_us = 1;
-
#ifdef CONFIG_PROC_FS
static int psched_show(struct seq_file *seq, void *v)
{
seq_printf(seq, "%08x %08x %08x %08x\n",
- psched_tick_per_us, psched_us_per_tick,
- 1000000, HZ);
+ (u32)NSEC_PER_USEC, (u32)PSCHED_US2NS(1),
+ 1000000, HZ);
return 0;
}
@@ -1202,80 +1199,10 @@ static const struct file_operations psch
};
#endif
-#ifdef CONFIG_NET_SCH_CLK_CPU
-psched_tdiff_t psched_clock_per_hz;
-int psched_clock_scale;
-EXPORT_SYMBOL(psched_clock_per_hz);
-EXPORT_SYMBOL(psched_clock_scale);
-
-psched_time_t psched_time_base;
-cycles_t psched_time_mark;
-EXPORT_SYMBOL(psched_time_mark);
-EXPORT_SYMBOL(psched_time_base);
-
-/*
- * Periodically adjust psched_time_base to avoid overflow
- * with 32-bit get_cycles(). Safe up to 4GHz CPU.
- */
-static void psched_tick(unsigned long);
-static DEFINE_TIMER(psched_timer, psched_tick, 0, 0);
-
-static void psched_tick(unsigned long dummy)
-{
- if (sizeof(cycles_t) == sizeof(u32)) {
- psched_time_t dummy_stamp;
- PSCHED_GET_TIME(dummy_stamp);
- psched_timer.expires = jiffies + 1*HZ;
- add_timer(&psched_timer);
- }
-}
-
-int __init psched_calibrate_clock(void)
-{
- psched_time_t stamp, stamp1;
- struct timeval tv, tv1;
- psched_tdiff_t delay;
- long rdelay;
- unsigned long stop;
-
- psched_tick(0);
- stop = jiffies + HZ/10;
- PSCHED_GET_TIME(stamp);
- do_gettimeofday(&tv);
- while (time_before(jiffies, stop)) {
- barrier();
- cpu_relax();
- }
- PSCHED_GET_TIME(stamp1);
- do_gettimeofday(&tv1);
-
- delay = PSCHED_TDIFF(stamp1, stamp);
- rdelay = tv1.tv_usec - tv.tv_usec;
- rdelay += (tv1.tv_sec - tv.tv_sec)*1000000;
- if (rdelay > delay)
- return -1;
- delay /= rdelay;
- psched_tick_per_us = delay;
- while ((delay>>=1) != 0)
- psched_clock_scale++;
- psched_us_per_tick = 1<<psched_clock_scale;
- psched_clock_per_hz = (psched_tick_per_us*(1000000/HZ))>>psched_clock_scale;
- return 0;
-}
-#endif
-
static int __init pktsched_init(void)
{
struct rtnetlink_link *link_p;
-#ifdef CONFIG_NET_SCH_CLK_CPU
- if (psched_calibrate_clock() < 0)
- return -1;
-#elif defined(CONFIG_NET_SCH_CLK_JIFFIES)
- psched_tick_per_us = HZ<<PSCHED_JSCALE;
- psched_us_per_tick = 1000000;
-#endif
-
link_p = rtnetlink_links[PF_UNSPEC];
/* Setup rtnetlink links. It is made here to avoid
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 396deb7..09cf6e4 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -195,20 +195,6 @@ struct hfsc_sched
struct timer_list wd_timer; /* watchdog timer */
};
-/*
- * macros
- */
-#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
-#include <linux/time.h>
-#undef PSCHED_GET_TIME
-#define PSCHED_GET_TIME(stamp) \
-do { \
- struct timeval tv; \
- do_gettimeofday(&tv); \
- (stamp) = 1ULL * USEC_PER_SEC * tv.tv_sec + tv.tv_usec; \
-} while (0)
-#endif
-
#define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */
@@ -394,28 +380,17 @@ cftree_update(struct hfsc_class *cl)
* ism: (psched_us/byte) << ISM_SHIFT
* dx: psched_us
*
- * Clock source resolution (CONFIG_NET_SCH_CLK_*)
- * JIFFIES: for 48<=HZ<=1534 resolution is between 0.63us and 1.27us.
- * CPU: resolution is between 0.5us and 1us.
- * GETTIMEOFDAY: resolution is exactly 1us.
+ * The clock source resolution with ktime is 1.024us.
*
* sm and ism are scaled in order to keep effective digits.
* SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
* digits in decimal using the following table.
*
- * Note: We can afford the additional accuracy (altq hfsc keeps at most
- * 3 effective digits) thanks to the fact that linux clock is bounded
- * much more tightly.
- *
* bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps
* ------------+-------------------------------------------------------
- * bytes/0.5us 6.25e-3 62.5e-3 625e-3 6250e-e 62500e-3
- * bytes/us 12.5e-3 125e-3 1250e-3 12500e-3 125000e-3
- * bytes/1.27us 15.875e-3 158.75e-3 1587.5e-3 15875e-3 158750e-3
+ * bytes/1.024us 12.8e-3 128e-3 1280e-3 12800e-3 128000e-3
*
- * 0.5us/byte 160 16 1.6 0.16 0.016
- * us/byte 80 8 0.8 0.08 0.008
- * 1.27us/byte 63 6.3 0.63 0.063 0.0063
+ * 1.024us/byte 78.125 7.8125 0.78125 0.078125 0.0078125
*/
#define SM_SHIFT 20
#define ISM_SHIFT 18
^ permalink raw reply related [flat|nested] 16+ messages in thread* [NET_SCHED 02/10]: Add hrtimer based qdisc watchdog
2007-03-16 5:30 [NET_SCHED 00/10]: ktime clocksource + hrtimer Patrick McHardy
2007-03-16 5:30 ` [NET_SCHED 01/10]: Use ktime as clocksource Patrick McHardy
@ 2007-03-16 5:30 ` Patrick McHardy
2007-03-16 5:30 ` [NET_SCHED 03/10]: sch_hfsc: use hrtimer based watchdog Patrick McHardy
` (8 subsequent siblings)
10 siblings, 0 replies; 16+ messages in thread
From: Patrick McHardy @ 2007-03-16 5:30 UTC (permalink / raw)
To: davem; +Cc: devik, netdev, Patrick McHardy, shemminger
[NET_SCHED]: Add hrtimer based qdisc watchdog
Signed-off-by: Patrick McHardy <kaber@trash.net>
---
commit 4d0baa5c06cb04a53d8c68ecc10f38a295f08d14
tree 08da6754c9cc015a3118c452649d9fb6425dd903
parent e400fa6d9e7c13d675b96958a967e747148e9b70
author Patrick McHardy <kaber@trash.net> Fri, 16 Mar 2007 06:06:26 +0100
committer Patrick McHardy <kaber@trash.net> Fri, 16 Mar 2007 06:06:26 +0100
include/net/pkt_sched.h | 10 ++++++++++
net/sched/sch_api.c | 36 ++++++++++++++++++++++++++++++++++++
2 files changed, 46 insertions(+), 0 deletions(-)
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 1c12afd..b090d55 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -64,6 +64,16 @@ #define PSCHED_SET_PASTPERFECT(t) ((t) =
#define PSCHED_IS_PASTPERFECT(t) ((t) == 0)
#define PSCHED_AUDIT_TDIFF(t)
+struct qdisc_watchdog {
+ struct hrtimer timer;
+ struct Qdisc *qdisc;
+};
+
+extern void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
+extern void qdisc_watchdog_schedule(struct qdisc_watchdog *wd,
+ psched_time_t expires);
+extern void qdisc_watchdog_cancel(struct qdisc_watchdog *wd);
+
extern struct Qdisc_ops pfifo_qdisc_ops;
extern struct Qdisc_ops bfifo_qdisc_ops;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index d71bf79..6bc395c 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -34,6 +34,7 @@ #include <linux/seq_file.h>
#include <linux/kmod.h>
#include <linux/list.h>
#include <linux/bitops.h>
+#include <linux/hrtimer.h>
#include <net/sock.h>
#include <net/pkt_sched.h>
@@ -291,6 +292,41 @@ void qdisc_put_rtab(struct qdisc_rate_ta
}
}
+static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
+{
+ struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
+ timer);
+
+ wd->qdisc->flags &= ~TCQ_F_THROTTLED;
+ netif_schedule(wd->qdisc->dev);
+ return HRTIMER_NORESTART;
+}
+
+void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
+{
+ hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ wd->timer.function = qdisc_watchdog;
+ wd->qdisc = qdisc;
+}
+EXPORT_SYMBOL(qdisc_watchdog_init);
+
+void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
+{
+ ktime_t time;
+
+ wd->qdisc->flags |= TCQ_F_THROTTLED;
+ time = ktime_set(0, 0);
+ time = ktime_add_ns(time, PSCHED_US2NS(expires));
+ hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
+}
+EXPORT_SYMBOL(qdisc_watchdog_schedule);
+
+void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
+{
+ hrtimer_cancel(&wd->timer);
+ wd->qdisc->flags &= ~TCQ_F_THROTTLED;
+}
+EXPORT_SYMBOL(qdisc_watchdog_cancel);
/* Allocate an unique handle from space managed by kernel */
^ permalink raw reply related [flat|nested] 16+ messages in thread* [NET_SCHED 03/10]: sch_hfsc: use hrtimer based watchdog
2007-03-16 5:30 [NET_SCHED 00/10]: ktime clocksource + hrtimer Patrick McHardy
2007-03-16 5:30 ` [NET_SCHED 01/10]: Use ktime as clocksource Patrick McHardy
2007-03-16 5:30 ` [NET_SCHED 02/10]: Add hrtimer based qdisc watchdog Patrick McHardy
@ 2007-03-16 5:30 ` Patrick McHardy
2007-03-16 5:30 ` [NET_SCHED 04/10]: sch_tbf: " Patrick McHardy
` (7 subsequent siblings)
10 siblings, 0 replies; 16+ messages in thread
From: Patrick McHardy @ 2007-03-16 5:30 UTC (permalink / raw)
To: davem; +Cc: devik, netdev, Patrick McHardy, shemminger
[NET_SCHED]: sch_hfsc: use hrtimer based watchdog
Signed-off-by: Patrick McHardy <kaber@trash.net>
---
commit 76728bda8872d54abd39cdc47d703384f735f7ea
tree 3563cceb7028b40160616835ef29bcef2523a0fb
parent 4d0baa5c06cb04a53d8c68ecc10f38a295f08d14
author Patrick McHardy <kaber@trash.net> Fri, 16 Mar 2007 06:06:28 +0100
committer Patrick McHardy <kaber@trash.net> Fri, 16 Mar 2007 06:06:28 +0100
net/sched/sch_hfsc.c | 32 +++++++-------------------------
1 files changed, 7 insertions(+), 25 deletions(-)
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 09cf6e4..49cae7d 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -59,7 +59,6 @@ #include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/string.h>
#include <linux/slab.h>
-#include <linux/timer.h>
#include <linux/list.h>
#include <linux/rbtree.h>
#include <linux/init.h>
@@ -192,7 +191,7 @@ struct hfsc_sched
struct list_head droplist; /* active leaf class list (for
dropping) */
struct sk_buff_head requeue; /* requeued packet */
- struct timer_list wd_timer; /* watchdog timer */
+ struct qdisc_watchdog watchdog; /* watchdog timer */
};
#define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */
@@ -1432,21 +1431,11 @@ hfsc_walk(struct Qdisc *sch, struct qdis
}
static void
-hfsc_watchdog(unsigned long arg)
-{
- struct Qdisc *sch = (struct Qdisc *)arg;
-
- sch->flags &= ~TCQ_F_THROTTLED;
- netif_schedule(sch->dev);
-}
-
-static void
-hfsc_schedule_watchdog(struct Qdisc *sch, u64 cur_time)
+hfsc_schedule_watchdog(struct Qdisc *sch)
{
struct hfsc_sched *q = qdisc_priv(sch);
struct hfsc_class *cl;
u64 next_time = 0;
- long delay;
if ((cl = eltree_get_minel(q)) != NULL)
next_time = cl->cl_e;
@@ -1455,11 +1444,7 @@ hfsc_schedule_watchdog(struct Qdisc *sch
next_time = q->root.cl_cfmin;
}
WARN_ON(next_time == 0);
- delay = next_time - cur_time;
- delay = PSCHED_US2JIFFIE(delay);
-
- sch->flags |= TCQ_F_THROTTLED;
- mod_timer(&q->wd_timer, jiffies + delay);
+ qdisc_watchdog_schedule(&q->watchdog, next_time);
}
static int
@@ -1496,9 +1481,7 @@ hfsc_init_qdisc(struct Qdisc *sch, struc
list_add(&q->root.hlist, &q->clhash[hfsc_hash(q->root.classid)]);
- init_timer(&q->wd_timer);
- q->wd_timer.function = hfsc_watchdog;
- q->wd_timer.data = (unsigned long)sch;
+ qdisc_watchdog_init(&q->watchdog, sch);
return 0;
}
@@ -1568,8 +1551,7 @@ hfsc_reset_qdisc(struct Qdisc *sch)
__skb_queue_purge(&q->requeue);
q->eligible = RB_ROOT;
INIT_LIST_HEAD(&q->droplist);
- del_timer(&q->wd_timer);
- sch->flags &= ~TCQ_F_THROTTLED;
+ qdisc_watchdog_cancel(&q->watchdog);
sch->q.qlen = 0;
}
@@ -1585,7 +1567,7 @@ hfsc_destroy_qdisc(struct Qdisc *sch)
hfsc_destroy_class(sch, cl);
}
__skb_queue_purge(&q->requeue);
- del_timer(&q->wd_timer);
+ qdisc_watchdog_cancel(&q->watchdog);
}
static int
@@ -1671,7 +1653,7 @@ hfsc_dequeue(struct Qdisc *sch)
cl = vttree_get_minvt(&q->root, cur_time);
if (cl == NULL) {
sch->qstats.overlimits++;
- hfsc_schedule_watchdog(sch, cur_time);
+ hfsc_schedule_watchdog(sch);
return NULL;
}
}
^ permalink raw reply related [flat|nested] 16+ messages in thread* [NET_SCHED 04/10]: sch_tbf: use hrtimer based watchdog
2007-03-16 5:30 [NET_SCHED 00/10]: ktime clocksource + hrtimer Patrick McHardy
` (2 preceding siblings ...)
2007-03-16 5:30 ` [NET_SCHED 03/10]: sch_hfsc: use hrtimer based watchdog Patrick McHardy
@ 2007-03-16 5:30 ` Patrick McHardy
2007-03-16 5:30 ` [NET_SCHED 05/10]: sch_netem: " Patrick McHardy
` (6 subsequent siblings)
10 siblings, 0 replies; 16+ messages in thread
From: Patrick McHardy @ 2007-03-16 5:30 UTC (permalink / raw)
To: davem; +Cc: devik, netdev, Patrick McHardy, shemminger
[NET_SCHED]: sch_tbf: use hrtimer based watchdog
Signed-off-by: Patrick McHardy <kaber@trash.net>
---
commit 7acf6ee9c08fdbf5cb24b15d95432eef07506a38
tree b47d381f5c9560099cb954a186a0ee3fc7a34d60
parent 76728bda8872d54abd39cdc47d703384f735f7ea
author Patrick McHardy <kaber@trash.net> Fri, 16 Mar 2007 06:06:28 +0100
committer Patrick McHardy <kaber@trash.net> Fri, 16 Mar 2007 06:06:28 +0100
net/sched/sch_tbf.c | 31 +++++++------------------------
1 files changed, 7 insertions(+), 24 deletions(-)
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 85da8da..f14692f 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -127,8 +127,8 @@ struct tbf_sched_data
long tokens; /* Current number of B tokens */
long ptokens; /* Current number of P tokens */
psched_time_t t_c; /* Time check-point */
- struct timer_list wd_timer; /* Watchdog timer */
struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */
+ struct qdisc_watchdog watchdog; /* Watchdog timer */
};
#define L2T(q,L) ((q)->R_tab->data[(L)>>(q)->R_tab->rate.cell_log])
@@ -185,14 +185,6 @@ static unsigned int tbf_drop(struct Qdis
return len;
}
-static void tbf_watchdog(unsigned long arg)
-{
- struct Qdisc *sch = (struct Qdisc*)arg;
-
- sch->flags &= ~TCQ_F_THROTTLED;
- netif_schedule(sch->dev);
-}
-
static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
{
struct tbf_sched_data *q = qdisc_priv(sch);
@@ -202,7 +194,7 @@ static struct sk_buff *tbf_dequeue(struc
if (skb) {
psched_time_t now;
- long toks, delay;
+ long toks;
long ptoks = 0;
unsigned int len = skb->len;
@@ -230,12 +222,8 @@ static struct sk_buff *tbf_dequeue(struc
return skb;
}
- delay = PSCHED_US2JIFFIE(max_t(long, -toks, -ptoks));
-
- if (delay == 0)
- delay = 1;
-
- mod_timer(&q->wd_timer, jiffies+delay);
+ qdisc_watchdog_schedule(&q->watchdog,
+ now + max_t(long, -toks, -ptoks));
/* Maybe we have a shorter packet in the queue,
which can be sent now. It sounds cool,
@@ -254,7 +242,6 @@ static struct sk_buff *tbf_dequeue(struc
sch->qstats.drops++;
}
- sch->flags |= TCQ_F_THROTTLED;
sch->qstats.overlimits++;
}
return NULL;
@@ -269,8 +256,7 @@ static void tbf_reset(struct Qdisc* sch)
PSCHED_GET_TIME(q->t_c);
q->tokens = q->buffer;
q->ptokens = q->mtu;
- sch->flags &= ~TCQ_F_THROTTLED;
- del_timer(&q->wd_timer);
+ qdisc_watchdog_cancel(&q->watchdog);
}
static struct Qdisc *tbf_create_dflt_qdisc(struct Qdisc *sch, u32 limit)
@@ -378,10 +364,7 @@ static int tbf_init(struct Qdisc* sch, s
return -EINVAL;
PSCHED_GET_TIME(q->t_c);
- init_timer(&q->wd_timer);
- q->wd_timer.function = tbf_watchdog;
- q->wd_timer.data = (unsigned long)sch;
-
+ qdisc_watchdog_init(&q->watchdog, sch);
q->qdisc = &noop_qdisc;
return tbf_change(sch, opt);
@@ -391,7 +374,7 @@ static void tbf_destroy(struct Qdisc *sc
{
struct tbf_sched_data *q = qdisc_priv(sch);
- del_timer(&q->wd_timer);
+ qdisc_watchdog_cancel(&q->watchdog);
if (q->P_tab)
qdisc_put_rtab(q->P_tab);
^ permalink raw reply related [flat|nested] 16+ messages in thread* [NET_SCHED 05/10]: sch_netem: use hrtimer based watchdog
2007-03-16 5:30 [NET_SCHED 00/10]: ktime clocksource + hrtimer Patrick McHardy
` (3 preceding siblings ...)
2007-03-16 5:30 ` [NET_SCHED 04/10]: sch_tbf: " Patrick McHardy
@ 2007-03-16 5:30 ` Patrick McHardy
2007-03-16 5:30 ` [NET_SCHED 06/10]: sch_cbq: " Patrick McHardy
` (5 subsequent siblings)
10 siblings, 0 replies; 16+ messages in thread
From: Patrick McHardy @ 2007-03-16 5:30 UTC (permalink / raw)
To: davem; +Cc: devik, netdev, Patrick McHardy, shemminger
[NET_SCHED]: sch_netem: use hrtimer based watchdog
Signed-off-by: Patrick McHardy <kaber@trash.net>
---
commit b0b8ce02c1564f86f09a0ccb728327d02cf202f0
tree 54ac4c91460fa9b34542dfc5dcbd9a796b037950
parent 7acf6ee9c08fdbf5cb24b15d95432eef07506a38
author Patrick McHardy <kaber@trash.net> Fri, 16 Mar 2007 06:06:28 +0100
committer Patrick McHardy <kaber@trash.net> Fri, 16 Mar 2007 06:06:28 +0100
net/sched/sch_netem.c | 25 +++++--------------------
1 files changed, 5 insertions(+), 20 deletions(-)
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 1ccbfb5..915f82a 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -54,7 +54,7 @@ #define VERSION "1.2"
struct netem_sched_data {
struct Qdisc *qdisc;
- struct timer_list timer;
+ struct qdisc_watchdog watchdog;
u32 latency;
u32 loss;
@@ -284,7 +284,7 @@ static struct sk_buff *netem_dequeue(str
sch->flags &= ~TCQ_F_THROTTLED;
return skb;
} else {
- psched_tdiff_t delay = PSCHED_TDIFF(cb->time_to_send, now);
+ qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) {
qdisc_tree_decrease_qlen(q->qdisc, 1);
@@ -292,32 +292,19 @@ static struct sk_buff *netem_dequeue(str
printk(KERN_ERR "netem: queue discpline %s could not requeue\n",
q->qdisc->ops->id);
}
-
- mod_timer(&q->timer, jiffies + PSCHED_US2JIFFIE(delay));
- sch->flags |= TCQ_F_THROTTLED;
}
}
return NULL;
}
-static void netem_watchdog(unsigned long arg)
-{
- struct Qdisc *sch = (struct Qdisc *)arg;
-
- pr_debug("netem_watchdog qlen=%d\n", sch->q.qlen);
- sch->flags &= ~TCQ_F_THROTTLED;
- netif_schedule(sch->dev);
-}
-
static void netem_reset(struct Qdisc *sch)
{
struct netem_sched_data *q = qdisc_priv(sch);
qdisc_reset(q->qdisc);
sch->q.qlen = 0;
- sch->flags &= ~TCQ_F_THROTTLED;
- del_timer_sync(&q->timer);
+ qdisc_watchdog_cancel(&q->watchdog);
}
/* Pass size change message down to embedded FIFO */
@@ -567,9 +554,7 @@ static int netem_init(struct Qdisc *sch,
if (!opt)
return -EINVAL;
- init_timer(&q->timer);
- q->timer.function = netem_watchdog;
- q->timer.data = (unsigned long) sch;
+ qdisc_watchdog_init(&q->watchdog, sch);
q->qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops,
TC_H_MAKE(sch->handle, 1));
@@ -590,7 +575,7 @@ static void netem_destroy(struct Qdisc *
{
struct netem_sched_data *q = qdisc_priv(sch);
- del_timer_sync(&q->timer);
+ qdisc_watchdog_cancel(&q->watchdog);
qdisc_destroy(q->qdisc);
kfree(q->delay_dist);
}
^ permalink raw reply related [flat|nested] 16+ messages in thread* [NET_SCHED 06/10]: sch_cbq: use hrtimer based watchdog
2007-03-16 5:30 [NET_SCHED 00/10]: ktime clocksource + hrtimer Patrick McHardy
` (4 preceding siblings ...)
2007-03-16 5:30 ` [NET_SCHED 05/10]: sch_netem: " Patrick McHardy
@ 2007-03-16 5:30 ` Patrick McHardy
2007-03-16 5:30 ` [NET_SCHED 07/10]: sch_cbq: fix cbq_undelay_prio for non-active priorites Patrick McHardy
` (4 subsequent siblings)
10 siblings, 0 replies; 16+ messages in thread
From: Patrick McHardy @ 2007-03-16 5:30 UTC (permalink / raw)
To: davem; +Cc: devik, netdev, Patrick McHardy, shemminger
[NET_SCHED]: sch_cbq: use hrtimer based watchdog
Signed-off-by: Patrick McHardy <kaber@trash.net>
---
commit dc1a944b36eeddc06e7288a2eec9344252d4ccb7
tree c0bd33b0ada4fbda631c246f7fe5881cf830e932
parent b0b8ce02c1564f86f09a0ccb728327d02cf202f0
author Patrick McHardy <kaber@trash.net> Fri, 16 Mar 2007 06:06:29 +0100
committer Patrick McHardy <kaber@trash.net> Fri, 16 Mar 2007 06:06:29 +0100
net/sched/sch_cbq.c | 28 +++++++---------------------
1 files changed, 7 insertions(+), 21 deletions(-)
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 76c92e7..d29d121 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -181,11 +181,11 @@ #endif
unsigned pmask;
struct timer_list delay_timer;
- struct timer_list wd_timer; /* Watchdog timer,
+ struct qdisc_watchdog watchdog; /* Watchdog timer,
started when CBQ has
backlog, but cannot
transmit just now */
- long wd_expires;
+ psched_tdiff_t wd_expires;
int toplevel;
u32 hgenerator;
};
@@ -604,14 +604,6 @@ static void cbq_ovl_drop(struct cbq_clas
cbq_ovl_classic(cl);
}
-static void cbq_watchdog(unsigned long arg)
-{
- struct Qdisc *sch = (struct Qdisc*)arg;
-
- sch->flags &= ~TCQ_F_THROTTLED;
- netif_schedule(sch->dev);
-}
-
static unsigned long cbq_undelay_prio(struct cbq_sched_data *q, int prio)
{
struct cbq_class *cl;
@@ -1063,13 +1055,9 @@ cbq_dequeue(struct Qdisc *sch)
if (sch->q.qlen) {
sch->qstats.overlimits++;
- if (q->wd_expires) {
- long delay = PSCHED_US2JIFFIE(q->wd_expires);
- if (delay <= 0)
- delay = 1;
- mod_timer(&q->wd_timer, jiffies + delay);
- sch->flags |= TCQ_F_THROTTLED;
- }
+ if (q->wd_expires)
+ qdisc_watchdog_schedule(&q->watchdog,
+ q->now + q->wd_expires);
}
return NULL;
}
@@ -1276,7 +1264,7 @@ cbq_reset(struct Qdisc* sch)
q->pmask = 0;
q->tx_class = NULL;
q->tx_borrowed = NULL;
- del_timer(&q->wd_timer);
+ qdisc_watchdog_cancel(&q->watchdog);
del_timer(&q->delay_timer);
q->toplevel = TC_CBQ_MAXLEVEL;
PSCHED_GET_TIME(q->now);
@@ -1446,9 +1434,7 @@ static int cbq_init(struct Qdisc *sch, s
q->link.minidle = -0x7FFFFFFF;
q->link.stats_lock = &sch->dev->queue_lock;
- init_timer(&q->wd_timer);
- q->wd_timer.data = (unsigned long)sch;
- q->wd_timer.function = cbq_watchdog;
+ qdisc_watchdog_init(&q->watchdog, sch);
init_timer(&q->delay_timer);
q->delay_timer.data = (unsigned long)sch;
q->delay_timer.function = cbq_undelay;
^ permalink raw reply related [flat|nested] 16+ messages in thread* [NET_SCHED 07/10]: sch_cbq: fix cbq_undelay_prio for non-active priorites
2007-03-16 5:30 [NET_SCHED 00/10]: ktime clocksource + hrtimer Patrick McHardy
` (5 preceding siblings ...)
2007-03-16 5:30 ` [NET_SCHED 06/10]: sch_cbq: " Patrick McHardy
@ 2007-03-16 5:30 ` Patrick McHardy
2007-03-16 5:30 ` [NET_SCHED 08/10]: sch_cbq: use hrtimer for delay_timer Patrick McHardy
` (3 subsequent siblings)
10 siblings, 0 replies; 16+ messages in thread
From: Patrick McHardy @ 2007-03-16 5:30 UTC (permalink / raw)
To: davem; +Cc: devik, netdev, Patrick McHardy, shemminger
[NET_SCHED]: sch_cbq: fix cbq_undelay_prio for non-active priorites
cbq_undelay_prio is supposed to return a time delta, but returns the
current time for non-active priorities, causing cbq_undelay to mark
the priority as active and schedule a timer for twice the current
time.
Signed-off-by: Patrick McHardy <kaber@trash.net>
---
commit 40c78645115590d29549f6a1603847a620206d29
tree 256ba8884abb84aedaec857800a93a0619f9195e
parent dc1a944b36eeddc06e7288a2eec9344252d4ccb7
author Patrick McHardy <kaber@trash.net> Fri, 16 Mar 2007 06:06:29 +0100
committer Patrick McHardy <kaber@trash.net> Fri, 16 Mar 2007 06:06:29 +0100
net/sched/sch_cbq.c | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index d29d121..32f6a30 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -612,7 +612,7 @@ static unsigned long cbq_undelay_prio(st
unsigned long sched = now;
if (cl_prev == NULL)
- return now;
+ return 0;
do {
cl = cl_prev->next_alive;
^ permalink raw reply related [flat|nested] 16+ messages in thread* [NET_SCHED 08/10]: sch_cbq: use hrtimer for delay_timer
2007-03-16 5:30 [NET_SCHED 00/10]: ktime clocksource + hrtimer Patrick McHardy
` (6 preceding siblings ...)
2007-03-16 5:30 ` [NET_SCHED 07/10]: sch_cbq: fix cbq_undelay_prio for non-active priorites Patrick McHardy
@ 2007-03-16 5:30 ` Patrick McHardy
2007-03-16 5:31 ` [NET_SCHED 09/10]: sch_htb: use hrtimer based watchdog Patrick McHardy
` (2 subsequent siblings)
10 siblings, 0 replies; 16+ messages in thread
From: Patrick McHardy @ 2007-03-16 5:30 UTC (permalink / raw)
To: davem; +Cc: devik, netdev, Patrick McHardy, shemminger
[NET_SCHED]: sch_cbq: use hrtimer for delay_timer
Switch delay_timer to hrtimer.
The class penalty parameter is changed to use psched ticks as units.
Since iproute never supported using this and the only existing user
(libnl) incorrectly assumes psched ticks as units anyway, this
shouldn't break anything.
Signed-off-by: Patrick McHardy <kaber@trash.net>
---
commit 2d548d3ea10cb1b49db8913fbab32811d9e7c3d0
tree 11561d6ada5553eb4fe945bd02cb31779be3c80d
parent 40c78645115590d29549f6a1603847a620206d29
author Patrick McHardy <kaber@trash.net> Fri, 16 Mar 2007 06:16:05 +0100
committer Patrick McHardy <kaber@trash.net> Fri, 16 Mar 2007 06:16:05 +0100
net/sched/sch_cbq.c | 70 ++++++++++++++++++++++++++++++---------------------
1 files changed, 41 insertions(+), 29 deletions(-)
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 32f6a30..ed7d540 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -112,7 +112,7 @@ #endif
/* Overlimit strategy parameters */
void (*overlimit)(struct cbq_class *cl);
- long penalty;
+ psched_tdiff_t penalty;
/* General scheduler (WRR) parameters */
long allot;
@@ -143,7 +143,7 @@ #endif
psched_time_t undertime;
long avgidle;
long deficit; /* Saved deficit for WRR */
- unsigned long penalized;
+ psched_time_t penalized;
struct gnet_stats_basic bstats;
struct gnet_stats_queue qstats;
struct gnet_stats_rate_est rate_est;
@@ -180,7 +180,7 @@ #endif
psched_time_t now_rt; /* Cached real time */
unsigned pmask;
- struct timer_list delay_timer;
+ struct hrtimer delay_timer;
struct qdisc_watchdog watchdog; /* Watchdog timer,
started when CBQ has
backlog, but cannot
@@ -549,7 +549,8 @@ static void cbq_ovl_delay(struct cbq_cla
psched_tdiff_t delay = PSCHED_TDIFF(cl->undertime, q->now);
if (!cl->delayed) {
- unsigned long sched = jiffies;
+ psched_time_t sched = q->now;
+ ktime_t expires;
delay += cl->offtime;
if (cl->avgidle < 0)
@@ -559,14 +560,18 @@ static void cbq_ovl_delay(struct cbq_cla
PSCHED_TADD2(q->now, delay, cl->undertime);
if (delay > 0) {
- sched += PSCHED_US2JIFFIE(delay) + cl->penalty;
+ sched += delay + cl->penalty;
cl->penalized = sched;
cl->cpriority = TC_CBQ_MAXPRIO;
q->pmask |= (1<<TC_CBQ_MAXPRIO);
- if (del_timer(&q->delay_timer) &&
- (long)(q->delay_timer.expires - sched) > 0)
- q->delay_timer.expires = sched;
- add_timer(&q->delay_timer);
+
+ expires = ktime_set(0, 0);
+ expires = ktime_add_ns(expires, PSCHED_US2NS(sched));
+ if (hrtimer_try_to_cancel(&q->delay_timer) &&
+ ktime_to_ns(ktime_sub(q->delay_timer.expires,
+ expires)) > 0)
+ q->delay_timer.expires = expires;
+ hrtimer_restart(&q->delay_timer);
cl->delayed = 1;
cl->xstats.overactions++;
return;
@@ -583,7 +588,7 @@ static void cbq_ovl_lowprio(struct cbq_c
{
struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
- cl->penalized = jiffies + cl->penalty;
+ cl->penalized = q->now + cl->penalty;
if (cl->cpriority != cl->priority2) {
cl->cpriority = cl->priority2;
@@ -604,19 +609,19 @@ static void cbq_ovl_drop(struct cbq_clas
cbq_ovl_classic(cl);
}
-static unsigned long cbq_undelay_prio(struct cbq_sched_data *q, int prio)
+static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
+ psched_time_t now)
{
struct cbq_class *cl;
struct cbq_class *cl_prev = q->active[prio];
- unsigned long now = jiffies;
- unsigned long sched = now;
+ psched_time_t sched = now;
if (cl_prev == NULL)
return 0;
do {
cl = cl_prev->next_alive;
- if ((long)(now - cl->penalized) > 0) {
+ if (now - cl->penalized > 0) {
cl_prev->next_alive = cl->next_alive;
cl->next_alive = NULL;
cl->cpriority = cl->priority;
@@ -632,30 +637,34 @@ static unsigned long cbq_undelay_prio(st
}
cl = cl_prev->next_alive;
- } else if ((long)(sched - cl->penalized) > 0)
+ } else if (sched - cl->penalized > 0)
sched = cl->penalized;
} while ((cl_prev = cl) != q->active[prio]);
- return (long)(sched - now);
+ return sched - now;
}
-static void cbq_undelay(unsigned long arg)
+static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
{
- struct Qdisc *sch = (struct Qdisc*)arg;
- struct cbq_sched_data *q = qdisc_priv(sch);
- long delay = 0;
+ struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data,
+ delay_timer);
+ struct Qdisc *sch = q->watchdog.qdisc;
+ psched_time_t now;
+ psched_tdiff_t delay = 0;
unsigned pmask;
+ PSCHED_GET_TIME(now);
+
pmask = q->pmask;
q->pmask = 0;
while (pmask) {
int prio = ffz(~pmask);
- long tmp;
+ psched_tdiff_t tmp;
pmask &= ~(1<<prio);
- tmp = cbq_undelay_prio(q, prio);
+ tmp = cbq_undelay_prio(q, prio, now);
if (tmp > 0) {
q->pmask |= 1<<prio;
if (tmp < delay || delay == 0)
@@ -664,12 +673,16 @@ static void cbq_undelay(unsigned long ar
}
if (delay) {
- q->delay_timer.expires = jiffies + delay;
- add_timer(&q->delay_timer);
+ ktime_t time;
+
+ time = ktime_set(0, 0);
+ time = ktime_add_ns(time, PSCHED_US2NS(now + delay));
+ hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS);
}
sch->flags &= ~TCQ_F_THROTTLED;
netif_schedule(sch->dev);
+ return HRTIMER_NORESTART;
}
@@ -1265,7 +1278,7 @@ cbq_reset(struct Qdisc* sch)
q->tx_class = NULL;
q->tx_borrowed = NULL;
qdisc_watchdog_cancel(&q->watchdog);
- del_timer(&q->delay_timer);
+ hrtimer_cancel(&q->delay_timer);
q->toplevel = TC_CBQ_MAXLEVEL;
PSCHED_GET_TIME(q->now);
q->now_rt = q->now;
@@ -1367,7 +1380,7 @@ static int cbq_set_overlimit(struct cbq_
default:
return -EINVAL;
}
- cl->penalty = (ovl->penalty*HZ)/1000;
+ cl->penalty = ovl->penalty;
return 0;
}
@@ -1435,8 +1448,7 @@ static int cbq_init(struct Qdisc *sch, s
q->link.stats_lock = &sch->dev->queue_lock;
qdisc_watchdog_init(&q->watchdog, sch);
- init_timer(&q->delay_timer);
- q->delay_timer.data = (unsigned long)sch;
+ hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
q->delay_timer.function = cbq_undelay;
q->toplevel = TC_CBQ_MAXLEVEL;
PSCHED_GET_TIME(q->now);
@@ -1514,7 +1526,7 @@ static __inline__ int cbq_dump_ovl(struc
opt.strategy = cl->ovl_strategy;
opt.priority2 = cl->priority2+1;
opt.pad = 0;
- opt.penalty = (cl->penalty*1000)/HZ;
+ opt.penalty = cl->penalty;
RTA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt);
return skb->len;
^ permalink raw reply related [flat|nested] 16+ messages in thread* [NET_SCHED 09/10]: sch_htb: use hrtimer based watchdog
2007-03-16 5:30 [NET_SCHED 00/10]: ktime clocksource + hrtimer Patrick McHardy
` (7 preceding siblings ...)
2007-03-16 5:30 ` [NET_SCHED 08/10]: sch_cbq: use hrtimer for delay_timer Patrick McHardy
@ 2007-03-16 5:31 ` Patrick McHardy
2007-03-16 5:31 ` [NET_SCHED 10/10]: kill jiffie conversion macros Patrick McHardy
2007-03-16 9:35 ` [NET_SCHED 00/10]: ktime clocksource + hrtimer David Miller
10 siblings, 0 replies; 16+ messages in thread
From: Patrick McHardy @ 2007-03-16 5:31 UTC (permalink / raw)
To: davem; +Cc: devik, netdev, Patrick McHardy, shemminger
[NET_SCHED]: sch_htb: use hrtimer based watchdog
Signed-off-by: Patrick McHardy <kaber@trash.net>
---
commit a5eaa252c5da48fef25a308ebc99b4020dad7f64
tree a609ff508f5ec9a93df1ace8f814aa0a71a5ff97
parent 2d548d3ea10cb1b49db8913fbab32811d9e7c3d0
author Patrick McHardy <kaber@trash.net> Fri, 16 Mar 2007 06:17:30 +0100
committer Patrick McHardy <kaber@trash.net> Fri, 16 Mar 2007 06:17:30 +0100
net/sched/sch_htb.c | 91 +++++++++++++++++----------------------------------
1 files changed, 31 insertions(+), 60 deletions(-)
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 97cbb9a..b00cef7 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -128,7 +128,7 @@ #endif
} un;
struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
struct rb_node pq_node; /* node for event queue */
- unsigned long pq_key; /* the same type as jiffies global */
+ psched_time_t pq_key;
int prio_activity; /* for which prios are we active */
enum htb_cmode cmode; /* current mode of the class */
@@ -179,10 +179,7 @@ struct htb_sched {
struct rb_root wait_pq[TC_HTB_MAXDEPTH];
/* time of nearest event per level (row) */
- unsigned long near_ev_cache[TC_HTB_MAXDEPTH];
-
- /* cached value of jiffies in dequeue */
- unsigned long jiffies;
+ psched_time_t near_ev_cache[TC_HTB_MAXDEPTH];
/* whether we hit non-work conserving class during this dequeue; we use */
int nwc_hit; /* this to disable mindelay complaint in dequeue */
@@ -195,7 +192,7 @@ struct htb_sched {
int rate2quantum; /* quant = rate / rate2quantum */
psched_time_t now; /* cached dequeue time */
- struct timer_list timer; /* send delay timer */
+ struct qdisc_watchdog watchdog;
#ifdef HTB_RATECM
struct timer_list rttim; /* rate computer timer */
int recmp_bucket; /* which hash bucket to recompute next */
@@ -342,19 +339,19 @@ static void htb_add_to_wait_tree(struct
{
struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
- cl->pq_key = q->jiffies + PSCHED_US2JIFFIE(delay);
- if (cl->pq_key == q->jiffies)
+ cl->pq_key = q->now + delay;
+ if (cl->pq_key == q->now)
cl->pq_key++;
/* update the nearest event cache */
- if (time_after(q->near_ev_cache[cl->level], cl->pq_key))
+ if (q->near_ev_cache[cl->level] > cl->pq_key)
q->near_ev_cache[cl->level] = cl->pq_key;
while (*p) {
struct htb_class *c;
parent = *p;
c = rb_entry(parent, struct htb_class, pq_node);
- if (time_after_eq(cl->pq_key, c->pq_key))
+ if (cl->pq_key >= c->pq_key)
p = &parent->rb_right;
else
p = &parent->rb_left;
@@ -679,14 +676,6 @@ static int htb_requeue(struct sk_buff *s
return NET_XMIT_SUCCESS;
}
-static void htb_timer(unsigned long arg)
-{
- struct Qdisc *sch = (struct Qdisc *)arg;
- sch->flags &= ~TCQ_F_THROTTLED;
- wmb();
- netif_schedule(sch->dev);
-}
-
#ifdef HTB_RATECM
#define RT_GEN(D,R) R+=D-(R/HTB_EWMAC);D=0
static void htb_rate_timer(unsigned long arg)
@@ -778,11 +767,11 @@ #endif
/**
* htb_do_events - make mode changes to classes at the level
*
- * Scans event queue for pending events and applies them. Returns jiffies to
+ * Scans event queue for pending events and applies them. Returns time of
* next pending event (0 for no event in pq).
- * Note: Aplied are events whose have cl->pq_key <= jiffies.
+ * Note: Applied are events whose have cl->pq_key <= q->now.
*/
-static long htb_do_events(struct htb_sched *q, int level)
+static psched_time_t htb_do_events(struct htb_sched *q, int level)
{
int i;
@@ -795,9 +784,9 @@ static long htb_do_events(struct htb_sch
return 0;
cl = rb_entry(p, struct htb_class, pq_node);
- if (time_after(cl->pq_key, q->jiffies)) {
- return cl->pq_key - q->jiffies;
- }
+ if (cl->pq_key > q->now)
+ return cl->pq_key;
+
htb_safe_rb_erase(p, q->wait_pq + level);
diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32) cl->mbuffer);
htb_change_class_mode(q, cl, &diff);
@@ -806,7 +795,7 @@ static long htb_do_events(struct htb_sch
}
if (net_ratelimit())
printk(KERN_WARNING "htb: too many events !\n");
- return HZ / 10;
+ return q->now + PSCHED_TICKS_PER_SEC / 10;
}
/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
@@ -958,30 +947,12 @@ next:
return skb;
}
-static void htb_delay_by(struct Qdisc *sch, long delay)
-{
- struct htb_sched *q = qdisc_priv(sch);
- if (delay <= 0)
- delay = 1;
- if (unlikely(delay > 5 * HZ)) {
- if (net_ratelimit())
- printk(KERN_INFO "HTB delay %ld > 5sec\n", delay);
- delay = 5 * HZ;
- }
- /* why don't use jiffies here ? because expires can be in past */
- mod_timer(&q->timer, q->jiffies + delay);
- sch->flags |= TCQ_F_THROTTLED;
- sch->qstats.overlimits++;
-}
-
static struct sk_buff *htb_dequeue(struct Qdisc *sch)
{
struct sk_buff *skb = NULL;
struct htb_sched *q = qdisc_priv(sch);
int level;
- long min_delay;
-
- q->jiffies = jiffies;
+ psched_time_t next_event;
/* try to dequeue direct packets as high prio (!) to minimize cpu work */
skb = __skb_dequeue(&q->direct_queue);
@@ -995,21 +966,23 @@ static struct sk_buff *htb_dequeue(struc
goto fin;
PSCHED_GET_TIME(q->now);
- min_delay = LONG_MAX;
+ next_event = q->now + 5 * PSCHED_TICKS_PER_SEC;
q->nwc_hit = 0;
for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
/* common case optimization - skip event handler quickly */
int m;
- long delay;
- if (time_after_eq(q->jiffies, q->near_ev_cache[level])) {
- delay = htb_do_events(q, level);
- q->near_ev_cache[level] =
- q->jiffies + (delay ? delay : HZ);
+ psched_time_t event;
+
+ if (q->now >= q->near_ev_cache[level]) {
+ event = htb_do_events(q, level);
+ q->near_ev_cache[level] = event ? event :
+ PSCHED_TICKS_PER_SEC;
} else
- delay = q->near_ev_cache[level] - q->jiffies;
+ event = q->near_ev_cache[level];
+
+ if (event && next_event > event)
+ next_event = event;
- if (delay && min_delay > delay)
- min_delay = delay;
m = ~q->row_mask[level];
while (m != (int)(-1)) {
int prio = ffz(m);
@@ -1022,7 +995,8 @@ static struct sk_buff *htb_dequeue(struc
}
}
}
- htb_delay_by(sch, min_delay > 5 * HZ ? 5 * HZ : min_delay);
+ sch->qstats.overlimits++;
+ qdisc_watchdog_schedule(&q->watchdog, next_event);
fin:
return skb;
}
@@ -1075,8 +1049,7 @@ static void htb_reset(struct Qdisc *sch)
}
}
- sch->flags &= ~TCQ_F_THROTTLED;
- del_timer(&q->timer);
+ qdisc_watchdog_cancel(&q->watchdog);
__skb_queue_purge(&q->direct_queue);
sch->q.qlen = 0;
memset(q->row, 0, sizeof(q->row));
@@ -1113,14 +1086,12 @@ static int htb_init(struct Qdisc *sch, s
for (i = 0; i < TC_HTB_NUMPRIO; i++)
INIT_LIST_HEAD(q->drops + i);
- init_timer(&q->timer);
+ qdisc_watchdog_init(&q->watchdog, sch);
skb_queue_head_init(&q->direct_queue);
q->direct_qlen = sch->dev->tx_queue_len;
if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
q->direct_qlen = 2;
- q->timer.function = htb_timer;
- q->timer.data = (unsigned long)sch;
#ifdef HTB_RATECM
init_timer(&q->rttim);
@@ -1341,7 +1312,7 @@ static void htb_destroy(struct Qdisc *sc
{
struct htb_sched *q = qdisc_priv(sch);
- del_timer_sync(&q->timer);
+ qdisc_watchdog_cancel(&q->watchdog);
#ifdef HTB_RATECM
del_timer_sync(&q->rttim);
#endif
^ permalink raw reply related [flat|nested] 16+ messages in thread* [NET_SCHED 10/10]: kill jiffie conversion macros
2007-03-16 5:30 [NET_SCHED 00/10]: ktime clocksource + hrtimer Patrick McHardy
` (8 preceding siblings ...)
2007-03-16 5:31 ` [NET_SCHED 09/10]: sch_htb: use hrtimer based watchdog Patrick McHardy
@ 2007-03-16 5:31 ` Patrick McHardy
2007-03-16 9:35 ` [NET_SCHED 00/10]: ktime clocksource + hrtimer David Miller
10 siblings, 0 replies; 16+ messages in thread
From: Patrick McHardy @ 2007-03-16 5:31 UTC (permalink / raw)
To: davem; +Cc: devik, netdev, Patrick McHardy, shemminger
[NET_SCHED]: kill jiffie conversion macros
Now that all packet schedulers have been converted to hrtimers most users
of PSCHED_JIFFIE2US and PSCHED_US2JIFFIE are gone. The remaining users use
it to convert external time units to packet scheduler clock ticks, so use
PSCHED_TICKS_PER_SEC instead.
Signed-off-by: Patrick McHardy <kaber@trash.net>
---
commit 32b945035419c2d458cd0bf7072acb335e5c4044
tree 6837d1c9419e1ada826701fbb1f9e93ff8ff5a33
parent a5eaa252c5da48fef25a308ebc99b4020dad7f64
author Patrick McHardy <kaber@trash.net> Fri, 16 Mar 2007 06:17:44 +0100
committer Patrick McHardy <kaber@trash.net> Fri, 16 Mar 2007 06:17:44 +0100
include/net/pkt_sched.h | 3 ---
net/sched/sch_hfsc.c | 12 ++++++------
net/sched/sch_htb.c | 2 +-
3 files changed, 7 insertions(+), 10 deletions(-)
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index b090d55..6555e57 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -51,9 +51,6 @@ #define PSCHED_TICKS_PER_SEC PSCHED_NS2
#define PSCHED_GET_TIME(stamp) \
((stamp) = PSCHED_NS2US(ktime_to_ns(ktime_get())))
-#define PSCHED_US2JIFFIE(usecs) usecs_to_jiffies(PSCHED_US2NS((usecs)) / NSEC_PER_USEC)
-#define PSCHED_JIFFIE2US(delay) PSCHED_NS2US(jiffies_to_usecs((delay)) * NSEC_PER_USEC)
-
#define PSCHED_TDIFF(tv1, tv2) (long)((tv1) - (tv2))
#define PSCHED_TDIFF_SAFE(tv1, tv2, bound) \
min_t(long long, (tv1) - (tv2), bound)
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 49cae7d..522018b 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -434,8 +434,8 @@ m2sm(u32 m)
u64 sm;
sm = ((u64)m << SM_SHIFT);
- sm += PSCHED_JIFFIE2US(HZ) - 1;
- do_div(sm, PSCHED_JIFFIE2US(HZ));
+ sm += PSCHED_TICKS_PER_SEC - 1;
+ do_div(sm, PSCHED_TICKS_PER_SEC);
return sm;
}
@@ -448,7 +448,7 @@ m2ism(u32 m)
if (m == 0)
ism = HT_INFINITY;
else {
- ism = ((u64)PSCHED_JIFFIE2US(HZ) << ISM_SHIFT);
+ ism = ((u64)PSCHED_TICKS_PER_SEC << ISM_SHIFT);
ism += m - 1;
do_div(ism, m);
}
@@ -461,7 +461,7 @@ d2dx(u32 d)
{
u64 dx;
- dx = ((u64)d * PSCHED_JIFFIE2US(HZ));
+ dx = ((u64)d * PSCHED_TICKS_PER_SEC);
dx += USEC_PER_SEC - 1;
do_div(dx, USEC_PER_SEC);
return dx;
@@ -473,7 +473,7 @@ sm2m(u64 sm)
{
u64 m;
- m = (sm * PSCHED_JIFFIE2US(HZ)) >> SM_SHIFT;
+ m = (sm * PSCHED_TICKS_PER_SEC) >> SM_SHIFT;
return (u32)m;
}
@@ -484,7 +484,7 @@ dx2d(u64 dx)
u64 d;
d = dx * USEC_PER_SEC;
- do_div(d, PSCHED_JIFFIE2US(HZ));
+ do_div(d, PSCHED_TICKS_PER_SEC);
return (u32)d;
}
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index b00cef7..1721043 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1469,7 +1469,7 @@ static int htb_change_class(struct Qdisc
/* set class to be in HTB_CAN_SEND state */
cl->tokens = hopt->buffer;
cl->ctokens = hopt->cbuffer;
- cl->mbuffer = PSCHED_JIFFIE2US(HZ * 60); /* 1min */
+ cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC; /* 1min */
PSCHED_GET_TIME(cl->t_c);
cl->cmode = HTB_CAN_SEND;
^ permalink raw reply related [flat|nested] 16+ messages in thread* Re: [NET_SCHED 00/10]: ktime clocksource + hrtimer
2007-03-16 5:30 [NET_SCHED 00/10]: ktime clocksource + hrtimer Patrick McHardy
` (9 preceding siblings ...)
2007-03-16 5:31 ` [NET_SCHED 10/10]: kill jiffie conversion macros Patrick McHardy
@ 2007-03-16 9:35 ` David Miller
2007-03-16 9:42 ` Patrick McHardy
10 siblings, 1 reply; 16+ messages in thread
From: David Miller @ 2007-03-16 9:35 UTC (permalink / raw)
To: kaber; +Cc: devik, netdev, shemminger
From: Patrick McHardy <kaber@trash.net>
Date: Fri, 16 Mar 2007 06:30:48 +0100 (MET)
> These patches convert the packet schedulers to use ktime as only clock
> source and kill off the manual clock source selection. Additionally all
> packet schedulers are converted to use hrtimer-based watchdogs, greatly
> increasing scheduling precision.
>
> I've tested HFSC, HTB, TBF and netem. CBQ is untested since I never
> managed to get it running properly even without these patches.
>
> I have a number of follow-up cleanup patches that get rid of most of
> the remaining PSCHED_* macros, I'll send them once these patches have
> been merged.
>
> Please apply, thanks.
All 11 patches applied and pushed out, very very nice work
Patrick!
It would be nice to figure out why CBQ isn't working for
you, I know at least some folks have used it successfully
at some point.
^ permalink raw reply [flat|nested] 16+ messages in thread* Re: [NET_SCHED 00/10]: ktime clocksource + hrtimer
2007-03-16 9:35 ` [NET_SCHED 00/10]: ktime clocksource + hrtimer David Miller
@ 2007-03-16 9:42 ` Patrick McHardy
2007-03-16 12:41 ` Thomas Graf
0 siblings, 1 reply; 16+ messages in thread
From: Patrick McHardy @ 2007-03-16 9:42 UTC (permalink / raw)
To: David Miller; +Cc: devik, netdev, shemminger
David Miller wrote:
> All 11 patches applied and pushed out, very very nice work
> Patrick!
Thanks Dave :)
> It would be nice to figure out why CBQ isn't working for
> you, I know at least some folks have used it successfully
> at some point.
Probably my own incompetence, I'll see if I can find some
scripts that are known to work.
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [NET_SCHED 00/10]: ktime clocksource + hrtimer
2007-03-16 9:42 ` Patrick McHardy
@ 2007-03-16 12:41 ` Thomas Graf
2007-03-16 12:45 ` Patrick McHardy
0 siblings, 1 reply; 16+ messages in thread
From: Thomas Graf @ 2007-03-16 12:41 UTC (permalink / raw)
To: Patrick McHardy; +Cc: David Miller, devik, netdev, shemminger
* Patrick McHardy <kaber@trash.net> 2007-03-16 10:42
> David Miller wrote:
> > It would be nice to figure out why CBQ isn't working for
> > you, I know at least some folks have used it successfully
> > at some point.
>
> Probably my own incompetence, I'll see if I can find some
> scripts that are known to work.
I'll complain if it stops working, I'm still using CBQ to
keep rsync and other bulk flows from interfering with ssh.
HTB just doesn't have the same homy touch :-)
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [NET_SCHED 00/10]: ktime clocksource + hrtimer
2007-03-16 12:41 ` Thomas Graf
@ 2007-03-16 12:45 ` Patrick McHardy
2007-03-16 19:31 ` David Miller
0 siblings, 1 reply; 16+ messages in thread
From: Patrick McHardy @ 2007-03-16 12:45 UTC (permalink / raw)
To: Thomas Graf; +Cc: David Miller, devik, netdev, shemminger
[-- Attachment #1: Type: text/plain, Size: 653 bytes --]
Thomas Graf wrote:
> * Patrick McHardy <kaber@trash.net> 2007-03-16 10:42
>
>>David Miller wrote:
>>
>>>It would be nice to figure out why CBQ isn't working for
>>>you, I know at least some folks have used it successfully
>>>at some point.
>>
>>Probably my own incompetence, I'll see if I can find some
>>scripts that are known to work.
>
>
> I'll complain if it stops working, I'm still using CBQ to
> keep rsync and other bulk flows from interfering with ssh.
> HTB just doesn't have the same homy touch :-)
:)
I managed to find a working script in the meantime and discovered
a bug in my changes, with this patch on top it seems to works fine.
[-- Attachment #2: x --]
[-- Type: text/plain, Size: 1055 bytes --]
[NET_SCHED]: sch_cbq: fix watchdog scheduled too late
q->now is increased during dequeue and doesn't contain the current time
afterwards, resulting in a too large timeout value for the qdisc watchdog.
Use "now" instead, which still contains the current time.
Signed-off-by: Patrick McHardy <kaber@trash.net>
---
commit b832b2f5f076cdd050b5b142dae10fcca874a4eb
tree 19b7dc63ace14cc4a5c0d6d6561487185aef8fdd
parent 9b85e807e5971a7ceff45ac450392909ea1c6954
author Patrick McHardy <kaber@trash.net> Fri, 16 Mar 2007 13:12:17 +0100
committer Patrick McHardy <kaber@trash.net> Fri, 16 Mar 2007 13:12:17 +0100
net/sched/sch_cbq.c | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 0491fad..d83414d 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1070,7 +1070,7 @@ cbq_dequeue(struct Qdisc *sch)
sch->qstats.overlimits++;
if (q->wd_expires)
qdisc_watchdog_schedule(&q->watchdog,
- q->now + q->wd_expires);
+ now + q->wd_expires);
}
return NULL;
}
^ permalink raw reply related [flat|nested] 16+ messages in thread
* Re: [NET_SCHED 00/10]: ktime clocksource + hrtimer
2007-03-16 12:45 ` Patrick McHardy
@ 2007-03-16 19:31 ` David Miller
0 siblings, 0 replies; 16+ messages in thread
From: David Miller @ 2007-03-16 19:31 UTC (permalink / raw)
To: kaber; +Cc: tgraf, devik, netdev, shemminger
From: Patrick McHardy <kaber@trash.net>
Date: Fri, 16 Mar 2007 13:45:32 +0100
> Thomas Graf wrote:
> > * Patrick McHardy <kaber@trash.net> 2007-03-16 10:42
> >
> >>David Miller wrote:
> >>
> >>>It would be nice to figure out why CBQ isn't working for
> >>>you, I know at least some folks have used it successfully
> >>>at some point.
> >>
> >>Probably my own incompetence, I'll see if I can find some
> >>scripts that are known to work.
> >
> >
> > I'll complain if it stops working, I'm still using CBQ to
> > keep rsync and other bulk flows from interfering with ssh.
> > HTB just doesn't have the same homy touch :-)
>
> :)
>
> I managed to find a working script in the meantime and discovered
> a bug in my changes, with this patch on top it seems to works fine.
Thank a lot Patrick, patch applied.
^ permalink raw reply [flat|nested] 16+ messages in thread