* [PATCH] sched - Implement priority and fifo support for SCHED_ISO
@ 2005-01-26 9:47 Con Kolivas
2005-01-31 18:54 ` Jack O'Quin
0 siblings, 1 reply; 11+ messages in thread
From: Con Kolivas @ 2005-01-26 9:47 UTC (permalink / raw)
To: linux kernel
Cc: Andrew Morton, Ingo Molnar, Alexander Nyberg, Jack O'Quin,
Zwane Mwaikambo
[-- Attachment #1.1: Type: text/plain, Size: 241 bytes --]
While it is not clear what form the final soft real time implementation
is, we should complete the partial implementation of SCHED_ISO that is
in 2.6.11-rc2-mm1.
Thanks to Alex Nyberg and Zwane Mwaikambo for debugging help.
Cheers,
Con
[-- Attachment #1.2: iso-add_prio-fifo.diff --]
[-- Type: text/x-diff, Size: 18059 bytes --]
This patch completes the implementation of the SCHED_ISO scheduling policy.
This splits the SCHED_ISO policy into two discrete policies which are the
unprivileged counterparts of SCHED_RR and SCHED_FIFO, calling them
SCHED_ISO_RR and SCHED_ISO_FIFO. When an unprivileged user calls for a real
time task they are downgraded to their SCHED_ISO counterparts.
This patch also adds full priority support to the isochronous scheduling
policies. Their range is the same size as the range available to
MAX_USER_RT_PRIO but their effective priority is lower than any privileged
real time tasks.
The priorities as seen to userspace would appear as:
0 -> 39 SCHED_NORMAL
-100 -> -1 Isochronous
-200 -> -101 Real time
Signed-off-by: Con Kolivas <kernel@kolivas.org>
Index: linux-2.6.11-rc2-mm1/include/linux/sched.h
===================================================================
--- linux-2.6.11-rc2-mm1.orig/include/linux/sched.h 2005-01-25 20:35:05.000000000 +1100
+++ linux-2.6.11-rc2-mm1/include/linux/sched.h 2005-01-25 20:36:01.000000000 +1100
@@ -132,18 +132,26 @@ extern unsigned long nr_iowait(void);
#define SCHED_FIFO 1
#define SCHED_RR 2
/* policy 3 reserved for SCHED_BATCH */
-#define SCHED_ISO 4
+#define SCHED_ISO_RR 4
+#define SCHED_ISO_FIFO 5
extern int iso_cpu, iso_period;
#define SCHED_RANGE(policy) ((policy) == SCHED_NORMAL || \
(policy) == SCHED_FIFO || \
(policy) == SCHED_RR || \
- (policy) == SCHED_ISO)
+ (policy) == SCHED_ISO_RR || \
+ (policy) == SCHED_ISO_FIFO)
#define SCHED_RT(policy) ((policy) == SCHED_FIFO || \
(policy) == SCHED_RR)
+#define SCHED_ISO(policy) ((policy) == SCHED_ISO_RR || \
+ (policy) == SCHED_ISO_FIFO)
+
+/* The policies that support a real time priority setting */
+#define SCHED_RT_PRIO(policy) (SCHED_RT(policy) || SCHED_ISO(policy))
+
struct sched_param {
int sched_priority;
};
@@ -382,15 +390,21 @@ struct signal_struct {
* user-space. This allows kernel threads to set their
* priority to a value higher than any user task. Note:
* MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
+ *
+ * SCHED_ISO tasks have a rt priority of the same range as
+ * real time tasks. They are seen as having either a priority
+ * of ISO_PRIO if below starvation limits or their underlying
+ * equivalent SCHED_NORMAL priority if above.
*/
#define MAX_USER_RT_PRIO 100
#define MAX_RT_PRIO MAX_USER_RT_PRIO
+#define ISO_PRIO (MAX_RT_PRIO - 1)
#define MAX_PRIO (MAX_RT_PRIO + 40)
-#define rt_task(p) (unlikely((p)->prio < MAX_RT_PRIO))
-#define iso_task(p) (unlikely((p)->policy == SCHED_ISO))
+#define rt_task(p) (unlikely((p)->prio < ISO_PRIO))
+#define iso_task(p) (unlikely(SCHED_ISO((p)->policy)))
/*
* Some day this will be a full-fledged user tracking system..
Index: linux-2.6.11-rc2-mm1/kernel/sched.c
===================================================================
--- linux-2.6.11-rc2-mm1.orig/kernel/sched.c 2005-01-25 20:35:05.000000000 +1100
+++ linux-2.6.11-rc2-mm1/kernel/sched.c 2005-01-25 23:25:06.000000000 +1100
@@ -184,6 +184,8 @@ int iso_period = 5; /* The time over whi
*/
#define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
+#define ISO_BITMAP_SIZE ((((MAX_USER_RT_PRIO+1+7)/8)+sizeof(long)-1)/ \
+ sizeof(long))
typedef struct runqueue runqueue_t;
@@ -212,7 +214,9 @@ struct runqueue {
unsigned long cpu_load;
#endif
unsigned long iso_ticks;
- struct list_head iso_queue;
+ unsigned long iso_running;
+ unsigned long iso_bitmap[ISO_BITMAP_SIZE];
+ struct list_head iso_queue[MAX_USER_RT_PRIO];
int iso_refractory;
/*
* Refractory is the flag that we've hit the maximum iso cpu and are
@@ -312,15 +316,26 @@ static DEFINE_PER_CPU(struct runqueue, r
# define task_running(rq, p) ((rq)->curr == (p))
#endif
-static inline int task_preempts_curr(task_t *p, runqueue_t *rq)
+static int task_preempts_curr(task_t *p, runqueue_t *rq)
{
- if ((!iso_task(p) && !iso_task(rq->curr)) || rq->iso_refractory ||
- rt_task(p) || rt_task(rq->curr)) {
- if (p->prio < rq->curr->prio)
- return 1;
- return 0;
+ int p_prio = p->prio, curr_prio = rq->curr->prio;
+
+ if (!iso_task(p) && !iso_task(rq->curr))
+ goto out;
+ if (!rq->iso_refractory) {
+ if (iso_task(p)) {
+ if (iso_task(rq->curr)) {
+ p_prio = -p->rt_priority;
+ curr_prio = -rq->curr->rt_priority;
+ goto out;
+ }
+ p_prio = ISO_PRIO;
+ if (iso_task(rq->curr))
+ curr_prio = ISO_PRIO;
+ }
}
- if (iso_task(p) && !iso_task(rq->curr))
+out:
+ if (p_prio < curr_prio)
return 1;
return 0;
}
@@ -590,41 +605,43 @@ static inline void sched_info_switch(tas
#define sched_info_switch(t, next) do { } while (0)
#endif /* CONFIG_SCHEDSTATS */
-static inline int iso_queued(runqueue_t *rq)
-{
- return !list_empty(&rq->iso_queue);
-}
+/* We invert the ISO rt_priorities for queueing order */
+#define iso_prio(p) (ISO_PRIO - (p)->rt_priority)
-static inline void dequeue_iso_task(struct task_struct *p)
+static void dequeue_iso_task(struct task_struct *p, runqueue_t *rq)
{
+ rq->iso_running--;
list_del(&p->iso_list);
+ if (list_empty(rq->iso_queue + iso_prio(p)))
+ __clear_bit(iso_prio(p), rq->iso_bitmap);
}
/*
* Adding/removing a task to/from a priority array:
*/
-static void dequeue_task(struct task_struct *p, prio_array_t *array)
+static void dequeue_task(struct task_struct *p, runqueue_t *rq, prio_array_t *array)
{
- array->nr_active--;
if (iso_task(p))
- dequeue_iso_task(p);
+ dequeue_iso_task(p, rq);
+ array->nr_active--;
list_del(&p->run_list);
if (list_empty(array->queue + p->prio))
__clear_bit(p->prio, array->bitmap);
}
/*
- * SCHED_ISO tasks are queued at both runqueues. Their actual priority is
+ * SCHED_ISO tasks are queued on both runqueues. Their actual priority is
* either better than SCHED_NORMAL if below starvation limits, or
- * the underlying SCHED_NORMAL dynamic priority.
+ * their underlying SCHED_NORMAL dynamic priority.
*/
-static void enqueue_iso_task(struct task_struct *p)
+static void enqueue_iso_task(struct task_struct *p, runqueue_t *rq)
{
- runqueue_t *rq = task_rq(p);
- list_add_tail(&p->iso_list, &rq->iso_queue);
+ list_add_tail(&p->iso_list, rq->iso_queue + iso_prio(p));
+ __set_bit(iso_prio(p), rq->iso_bitmap);
+ rq->iso_running++;
}
-static void enqueue_task(struct task_struct *p, prio_array_t *array)
+static void enqueue_task(struct task_struct *p, runqueue_t *rq, prio_array_t *array)
{
sched_info_queued(p);
list_add_tail(&p->run_list, array->queue + p->prio);
@@ -632,24 +649,23 @@ static void enqueue_task(struct task_str
array->nr_active++;
p->array = array;
if (iso_task(p))
- enqueue_iso_task(p);
+ enqueue_iso_task(p, rq);
}
-static void requeue_iso_task(struct task_struct *p)
+static void requeue_iso_task(struct task_struct *p, runqueue_t *rq)
{
- runqueue_t *rq = task_rq(p);
- list_move_tail(&p->iso_list, &rq->iso_queue);
+ list_move_tail(&p->iso_list, rq->iso_queue + iso_prio(p));
}
/*
* Put task to the end of the run list without the overhead of dequeue
* followed by enqueue.
*/
-static void requeue_task(struct task_struct *p, prio_array_t *array)
+static void requeue_task(struct task_struct *p, runqueue_t *rq, prio_array_t *array)
{
list_move_tail(&p->run_list, array->queue + p->prio);
if (iso_task(p))
- requeue_iso_task(p);
+ requeue_iso_task(p, rq);
}
static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array)
@@ -696,7 +712,7 @@ static int effective_prio(task_t *p)
*/
static inline void __activate_task(task_t *p, runqueue_t *rq)
{
- enqueue_task(p, rq->active);
+ enqueue_task(p, rq, rq->active);
rq->nr_running++;
}
@@ -825,7 +841,7 @@ static void activate_task(task_t *p, run
static void deactivate_task(struct task_struct *p, runqueue_t *rq)
{
rq->nr_running--;
- dequeue_task(p, p->array);
+ dequeue_task(p, rq, p->array);
p->array = NULL;
}
@@ -1300,7 +1316,7 @@ void fastcall wake_up_new_task(task_t *
p->array->nr_active++;
rq->nr_running++;
if (iso_task(p))
- enqueue_iso_task(p);
+ enqueue_iso_task(p, rq);
}
set_need_resched();
} else
@@ -1689,11 +1705,11 @@ static inline
void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
runqueue_t *this_rq, prio_array_t *this_array, int this_cpu)
{
- dequeue_task(p, src_array);
+ dequeue_task(p, src_rq, src_array);
src_rq->nr_running--;
set_task_cpu(p, this_cpu);
this_rq->nr_running++;
- enqueue_task(p, this_array);
+ enqueue_task(p, this_rq, this_array);
p->timestamp = (p->timestamp - src_rq->timestamp_last_tick)
+ this_rq->timestamp_last_tick;
/*
@@ -2497,42 +2513,29 @@ void scheduler_tick(void)
set_tsk_need_resched(p);
/* put it at the end of the queue: */
- requeue_task(p, rq->active);
+ requeue_task(p, rq, rq->active);
}
goto out_unlock;
}
if (iso_task(p)) {
- if (rq->iso_refractory) {
+ if (rq->iso_refractory)
/*
* If we are in the refractory period for SCHED_ISO
- * tasks we schedule them as SCHED_NORMAL. If their
- * priority is set to the ISO priority, change it.
+ * tasks we schedule them as SCHED_NORMAL.
*/
- if (p->prio == MAX_RT_PRIO) {
- dequeue_task(p, rq->active);
- p->prio = effective_prio(p);
- enqueue_task(p, rq->active);
- }
goto sched_normal;
- }
- if (p->prio > MAX_RT_PRIO) {
- dequeue_task(p, rq->active);
- p->prio = MAX_RT_PRIO;
- enqueue_task(p, rq->active);
- }
- if (!(--p->time_slice % GRANULARITY)) {
+ if (p->policy == SCHED_ISO_RR && !--p->time_slice) {
+ p->time_slice = task_timeslice(p);
set_tsk_need_resched(p);
- requeue_iso_task(p);
+ requeue_iso_task(p, rq);
}
- if (!p->time_slice)
- p->time_slice = task_timeslice(p);
goto out_unlock;
}
sched_normal:
if (!--p->time_slice) {
- dequeue_task(p, rq->active);
+ dequeue_task(p, rq, rq->active);
set_tsk_need_resched(p);
p->prio = effective_prio(p);
p->time_slice = task_timeslice(p);
@@ -2541,11 +2544,11 @@ sched_normal:
if (!rq->expired_timestamp)
rq->expired_timestamp = jiffies;
if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) {
- enqueue_task(p, rq->expired);
+ enqueue_task(p, rq, rq->expired);
if (p->static_prio < rq->best_expired_prio)
rq->best_expired_prio = p->static_prio;
} else
- enqueue_task(p, rq->active);
+ enqueue_task(p, rq, rq->active);
} else {
/*
* Prevent a too long timeslice allowing a task to monopolize
@@ -2568,7 +2571,7 @@ sched_normal:
(p->time_slice >= TIMESLICE_GRANULARITY(p)) &&
(p->array == rq->active)) {
- requeue_task(p, rq->active);
+ requeue_task(p, rq, rq->active);
set_tsk_need_resched(p);
}
}
@@ -2580,30 +2583,45 @@ out:
static inline int iso_ready(runqueue_t *rq)
{
- if (iso_queued(rq) && !rq->iso_refractory)
+ if (rq->iso_running && !rq->iso_refractory)
return 1;
return 0;
}
/*
- * When a SCHED_ISO task is ready to be scheduled, we re-queue it with an
- * effective prio of MAX_RT_PRIO for userspace to know its relative prio.
+ * When a SCHED_ISO task is ready to be scheduled, we ensure it is queued
+ * on the active array.
*/
-static task_t* queue_iso(runqueue_t *rq, prio_array_t *array)
+static task_t* find_iso(runqueue_t *rq, prio_array_t *array)
{
- task_t *p = list_entry(rq->iso_queue.next, task_t, iso_list);
- if (p->prio == MAX_RT_PRIO)
+ prio_array_t *old_array;
+ task_t *p;
+ int idx = find_first_bit(rq->iso_bitmap, MAX_USER_RT_PRIO);
+
+ p = list_entry(rq->iso_queue[idx].next, task_t, iso_list);
+ if (p->array == array)
goto out;
+ old_array = p->array;
+ old_array->nr_active--;
list_del(&p->run_list);
- if (list_empty(array->queue + p->prio))
- __clear_bit(p->prio, array->bitmap);
- p->prio = MAX_RT_PRIO;
+ if (list_empty(old_array->queue + p->prio))
+ __clear_bit(p->prio, old_array->bitmap);
list_add_tail(&p->run_list, array->queue + p->prio);
__set_bit(p->prio, array->bitmap);
-out:
+ array->nr_active++;
+ p->array = array;
+out:
return p;
}
+static inline task_t* find_next_task(runqueue_t *rq, prio_array_t *array)
+{
+ int idx = sched_find_first_bit(array->bitmap);
+ if (unlikely(iso_ready(rq) && idx > ISO_PRIO))
+ return find_iso(rq, array);
+ return list_entry(array->queue[idx].next, task_t, run_list);
+}
+
#ifdef CONFIG_SCHED_SMT
static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
{
@@ -2655,7 +2673,7 @@ static inline int dependent_sleeper(int
struct sched_domain *sd = this_rq->sd;
cpumask_t sibling_map;
prio_array_t *array;
- int ret = 0, i, idx;
+ int ret = 0, i;
task_t *p;
if (!(sd->flags & SD_SHARE_CPUPOWER))
@@ -2682,11 +2700,7 @@ static inline int dependent_sleeper(int
array = this_rq->expired;
BUG_ON(!array->nr_active);
- idx = sched_find_first_bit(array->bitmap);
- if (unlikely(iso_ready(this_rq) && idx >= MAX_RT_PRIO))
- p = queue_iso(this_rq, array);
- else
- p = list_entry(array->queue[idx].next, task_t, run_list);
+ p = find_next_task(this_rq, array);
for_each_cpu_mask(i, sibling_map) {
runqueue_t *smt_rq = cpu_rq(i);
@@ -2774,10 +2788,9 @@ asmlinkage void __sched schedule(void)
task_t *prev, *next;
runqueue_t *rq;
prio_array_t *array;
- struct list_head *queue;
unsigned long long now;
unsigned long run_time;
- int cpu, idx;
+ int cpu;
/*
* Test if we are atomic. Since do_exit() needs to call into
@@ -2885,13 +2898,7 @@ go_idle:
} else
schedstat_inc(rq, sched_noswitch);
- idx = sched_find_first_bit(array->bitmap);
- if (unlikely(iso_ready(rq) && idx >= MAX_RT_PRIO))
- next = queue_iso(rq, array);
- else {
- queue = array->queue + idx;
- next = list_entry(queue->next, task_t, run_list);
- }
+ next = find_next_task(rq, array);
if (!rt_task(next) && !(iso_task(next) && !rq->iso_refractory) &&
next->activated > 0) {
@@ -2901,9 +2908,9 @@ go_idle:
delta = delta *
(ON_RUNQUEUE_WEIGHT * 128 / 100) / 128;
array = next->array;
- dequeue_task(next, array);
+ dequeue_task(next, rq, array);
recalc_task_prio(next, next->timestamp + delta);
- enqueue_task(next, array);
+ enqueue_task(next, rq, array);
}
next->activated = 0;
switch_tasks:
@@ -3362,7 +3369,7 @@ void set_user_nice(task_t *p, long nice)
}
array = p->array;
if (array)
- dequeue_task(p, array);
+ dequeue_task(p, rq, array);
old_prio = p->prio;
new_prio = NICE_TO_PRIO(nice);
@@ -3371,7 +3378,7 @@ void set_user_nice(task_t *p, long nice)
p->prio += delta;
if (array) {
- enqueue_task(p, array);
+ enqueue_task(p, rq, array);
/*
* If the task increased its priority or is running and
* lowered its priority, then reschedule its CPU:
@@ -3446,6 +3453,10 @@ asmlinkage long sys_nice(int increment)
*/
int task_prio(const task_t *p)
{
+ if (iso_task(p))
+ return -(p->rt_priority);
+ if (rt_task(p))
+ return -(MAX_RT_PRIO + p->rt_priority);
return p->prio - MAX_RT_PRIO;
}
@@ -3524,28 +3535,36 @@ int sched_setscheduler(struct task_struc
runqueue_t *rq;
recheck:
+ if (SCHED_RT(policy) && !capable(CAP_SYS_NICE)) {
+ /*
+ * If the caller requested an RT policy without having the
+ * necessary rights, we downgrade the policy to the
+ * SCHED_ISO equivalent.
+ */
+ if ((policy) == SCHED_RR)
+ policy = SCHED_ISO_RR;
+ else
+ policy = SCHED_ISO_FIFO;
+ }
+
/* double check policy once rq lock held */
if (policy < 0)
policy = oldpolicy = p->policy;
else if (!SCHED_RANGE(policy))
return -EINVAL;
/*
- * Valid priorities for SCHED_FIFO and SCHED_RR are
+ * Valid priorities for SCHED_FIFO, SCHED_RR and SCHED_ISO are
* 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL is 0.
*/
if (param->sched_priority < 0 ||
param->sched_priority > MAX_USER_RT_PRIO-1)
return -EINVAL;
- if ((!SCHED_RT(policy)) != (param->sched_priority == 0))
+ if (SCHED_RT(policy) && !capable(CAP_SYS_NICE))
+ return -EPERM;
+
+ if ((!SCHED_RT_PRIO(policy)) != (param->sched_priority == 0))
return -EINVAL;
- if (SCHED_RT(policy) && !capable(CAP_SYS_NICE))
- /*
- * If the caller requested an RT policy without having the
- * necessary rights, we downgrade the policy to SCHED_ISO.
- * Temporary hack for testing.
- */
- policy = SCHED_ISO;
if ((current->euid != p->euid) && (current->euid != p->uid) &&
!capable(CAP_SYS_NICE))
return -EPERM;
@@ -3862,13 +3881,13 @@ asmlinkage long sys_sched_yield(void)
schedstat_inc(rq, yld_exp_empty);
if (array != target) {
- dequeue_task(current, array);
- enqueue_task(current, target);
+ dequeue_task(current, rq, array);
+ enqueue_task(current, rq, target);
} else
/*
* requeue_task is cheaper so perform that if possible.
*/
- requeue_task(current, array);
+ requeue_task(current, rq, array);
/*
* Since we are going to call schedule() anyway, there's
@@ -4006,10 +4025,11 @@ asmlinkage long sys_sched_get_priority_m
switch (policy) {
case SCHED_FIFO:
case SCHED_RR:
+ case SCHED_ISO_RR:
+ case SCHED_ISO_FIFO:
ret = MAX_USER_RT_PRIO-1;
break;
case SCHED_NORMAL:
- case SCHED_ISO:
ret = 0;
break;
}
@@ -4030,10 +4050,11 @@ asmlinkage long sys_sched_get_priority_m
switch (policy) {
case SCHED_FIFO:
case SCHED_RR:
+ case SCHED_ISO_RR:
+ case SCHED_ISO_FIFO:
ret = 1;
break;
case SCHED_NORMAL:
- case SCHED_ISO:
ret = 0;
}
return ret;
@@ -5092,7 +5113,7 @@ void __init sched_init(void)
rq->active = rq->arrays;
rq->expired = rq->arrays + 1;
rq->best_expired_prio = MAX_PRIO;
- rq->iso_refractory = rq->iso_ticks = 0;
+ rq->iso_refractory = rq->iso_ticks = rq->iso_running = 0;
#ifdef CONFIG_SMP
rq->sd = &sched_domain_dummy;
@@ -5113,7 +5134,11 @@ void __init sched_init(void)
// delimiter for bitsearch
__set_bit(MAX_PRIO, array->bitmap);
}
- INIT_LIST_HEAD(&rq->iso_queue);
+ for (j = 0; j < MAX_USER_RT_PRIO; j++) {
+ INIT_LIST_HEAD(rq->iso_queue + j);
+ __clear_bit(j, rq->iso_bitmap);
+ }
+ __set_bit(MAX_USER_RT_PRIO, rq->iso_bitmap);
}
/*
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 256 bytes --]
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH] sched - Implement priority and fifo support for SCHED_ISO
2005-01-26 9:47 [PATCH] sched - Implement priority and fifo support for SCHED_ISO Con Kolivas
@ 2005-01-31 18:54 ` Jack O'Quin
2005-01-31 20:15 ` Con Kolivas
2005-01-31 20:30 ` Con Kolivas
0 siblings, 2 replies; 11+ messages in thread
From: Jack O'Quin @ 2005-01-31 18:54 UTC (permalink / raw)
To: Con Kolivas
Cc: linux kernel, Andrew Morton, Ingo Molnar, Alexander Nyberg,
Zwane Mwaikambo
Con Kolivas <kernel@kolivas.org> writes:
> While it is not clear what form the final soft real time
> implementation is, we should complete the partial implementation of
> SCHED_ISO that is in 2.6.11-rc2-mm1.
I finally had a chance to try this today. I applied a slightly
different patch (2.6.11-rc2-iso3.diff) on top of patch-2.6.11-rc2. I
tried to use 2.6.11-rc2-mm2, but could not due to conflicts with other
scheduler updates.
It is not clear whether the realtime threads are running in the new
scheduler class. Checking with schedtool yields odd results.
(Before, my old schedtool always said "POLICY I: SCHED_ISO".)
[joq@sulphur] jack_test/ $ pst jackd
2173 2173 TS - 0 19 0 0.0 SLs rt_sigsuspend jackd
2174 2174 ? 21 0 60 0 0.0 SL - jackd
2175 2175 TS - 0 23 0 0.0 SL rt_sigsuspend jackd
2176 2176 TS - 0 23 0 0.0 SL - jackd
2177 2177 ? 20 0 59 0 0.0 SL syscall_call jackd
2178 2178 ? 10 0 49 0 1.7 SL - jackd
[joq@sulphur] jack_test/ $ schedtool 2174 2176 2177 2178
PID 2174: PRIO 21, POLICY (null) , NICE 0
PID 2176: PRIO 0, POLICY N: SCHED_NORMAL, NICE 0
PID 2177: PRIO 20, POLICY (null) , NICE 0
PID 2178: PRIO 10, POLICY (null) , NICE 0
The results of the first run indicate something is badly wrong. It is
quite possible that I got confused and messed up the build somehow.
http://www.joq.us/jack/benchmarks/sched-iso3/jack_test3-2.6.11-rc2-q1-200501311225.log
http://www.joq.us/jack/benchmarks/sched-iso3/jack_test3-2.6.11-rc2-q1-200501311225.png
Loading the realtime-lsm and then running with SCHED_FIFO *does* work
as expected on this kernel. I should retry the test with *exactly*
the expected patch sequence. What would that be?
--
joq
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH] sched - Implement priority and fifo support for SCHED_ISO
2005-01-31 18:54 ` Jack O'Quin
@ 2005-01-31 20:15 ` Con Kolivas
2005-01-31 20:30 ` Con Kolivas
1 sibling, 0 replies; 11+ messages in thread
From: Con Kolivas @ 2005-01-31 20:15 UTC (permalink / raw)
To: Jack O'Quin
Cc: linux kernel, Andrew Morton, Ingo Molnar, Alexander Nyberg,
Zwane Mwaikambo
[-- Attachment #1: Type: text/plain, Size: 2149 bytes --]
Jack O'Quin wrote:
> Con Kolivas <kernel@kolivas.org> writes:
>
>
>>While it is not clear what form the final soft real time
>>implementation is, we should complete the partial implementation of
>>SCHED_ISO that is in 2.6.11-rc2-mm1.
>
>
> I finally had a chance to try this today. I applied a slightly
> different patch (2.6.11-rc2-iso3.diff) on top of patch-2.6.11-rc2. I
> tried to use 2.6.11-rc2-mm2, but could not due to conflicts with other
> scheduler updates.
>
> It is not clear whether the realtime threads are running in the new
> scheduler class. Checking with schedtool yields odd results.
> (Before, my old schedtool always said "POLICY I: SCHED_ISO".)
>
> [joq@sulphur] jack_test/ $ pst jackd
> 2173 2173 TS - 0 19 0 0.0 SLs rt_sigsuspend jackd
> 2174 2174 ? 21 0 60 0 0.0 SL - jackd
> 2175 2175 TS - 0 23 0 0.0 SL rt_sigsuspend jackd
> 2176 2176 TS - 0 23 0 0.0 SL - jackd
> 2177 2177 ? 20 0 59 0 0.0 SL syscall_call jackd
> 2178 2178 ? 10 0 49 0 1.7 SL - jackd
> [joq@sulphur] jack_test/ $ schedtool 2174 2176 2177 2178
> PID 2174: PRIO 21, POLICY (null) , NICE 0
> PID 2176: PRIO 0, POLICY N: SCHED_NORMAL, NICE 0
> PID 2177: PRIO 20, POLICY (null) , NICE 0
> PID 2178: PRIO 10, POLICY (null) , NICE 0
They're SCHED_ISO_FIFO which schedtool doesn't know about.
> The results of the first run indicate something is badly wrong. It is
> quite possible that I got confused and messed up the build somehow.
>
> http://www.joq.us/jack/benchmarks/sched-iso3/jack_test3-2.6.11-rc2-q1-200501311225.log
> http://www.joq.us/jack/benchmarks/sched-iso3/jack_test3-2.6.11-rc2-q1-200501311225.png
>
> Loading the realtime-lsm and then running with SCHED_FIFO *does* work
> as expected on this kernel. I should retry the test with *exactly*
> the expected patch sequence. What would that be?
Shouldn't matter. There must still be something wrong with my code...
sigh. I'll look into it at some stage, but there doesn't seem much point.
Cheers,
Con
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 256 bytes --]
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH] sched - Implement priority and fifo support for SCHED_ISO
2005-01-31 18:54 ` Jack O'Quin
2005-01-31 20:15 ` Con Kolivas
@ 2005-01-31 20:30 ` Con Kolivas
2005-01-31 21:04 ` Jack O'Quin
2005-01-31 22:51 ` Jack O'Quin
1 sibling, 2 replies; 11+ messages in thread
From: Con Kolivas @ 2005-01-31 20:30 UTC (permalink / raw)
To: Jack O'Quin
Cc: linux kernel, Andrew Morton, Ingo Molnar, Alexander Nyberg,
Zwane Mwaikambo
[-- Attachment #1.1: Type: text/plain, Size: 383 bytes --]
Jack O'Quin wrote:
> Loading the realtime-lsm and then running with SCHED_FIFO *does* work
> as expected on this kernel. I should retry the test with *exactly*
> the expected patch sequence. What would that be?
Sure enough I found the bug in less than 5 mins, and it would definitely
cause this terrible behaviour.
A silly bracket transposition error on my part :P
Cheers,
Con
[-- Attachment #1.2: iso_preempt_fix.diff --]
[-- Type: text/x-diff, Size: 472 bytes --]
Index: linux-2.6.11-rc2-iso/kernel/sched.c
===================================================================
--- linux-2.6.11-rc2-iso.orig/kernel/sched.c 2005-02-01 07:28:40.171079813 +1100
+++ linux-2.6.11-rc2-iso/kernel/sched.c 2005-02-01 07:29:21.332297160 +1100
@@ -326,9 +326,9 @@ static int task_preempts_curr(task_t *p,
goto out;
}
p_prio = ISO_PRIO;
+ }
if (iso_task(rq->curr))
curr_prio = ISO_PRIO;
- }
}
out:
if (p_prio < curr_prio)
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 256 bytes --]
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH] sched - Implement priority and fifo support for SCHED_ISO
2005-01-31 20:30 ` Con Kolivas
@ 2005-01-31 21:04 ` Jack O'Quin
2005-01-31 22:51 ` Jack O'Quin
1 sibling, 0 replies; 11+ messages in thread
From: Jack O'Quin @ 2005-01-31 21:04 UTC (permalink / raw)
To: Con Kolivas
Cc: linux kernel, Andrew Morton, Ingo Molnar, Alexander Nyberg,
Zwane Mwaikambo
Con Kolivas <kernel@kolivas.org> writes:
> Jack O'Quin wrote:
>> Loading the realtime-lsm and then running with SCHED_FIFO *does* work
>> as expected on this kernel. I should retry the test with *exactly*
>> the expected patch sequence. What would that be?
>
> Sure enough I found the bug in less than 5 mins, and it would
> definitely cause this terrible behaviour.
>
> A silly bracket transposition error on my part :P
Cool. I'll try that right away.
--
joq
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH] sched - Implement priority and fifo support for SCHED_ISO
2005-01-31 20:30 ` Con Kolivas
2005-01-31 21:04 ` Jack O'Quin
@ 2005-01-31 22:51 ` Jack O'Quin
2005-01-31 23:01 ` Con Kolivas
1 sibling, 1 reply; 11+ messages in thread
From: Jack O'Quin @ 2005-01-31 22:51 UTC (permalink / raw)
To: Con Kolivas
Cc: linux kernel, Andrew Morton, Ingo Molnar, Alexander Nyberg,
Zwane Mwaikambo
Con Kolivas <kernel@kolivas.org> writes:
> Sure enough I found the bug in less than 5 mins, and it would
> definitely cause this terrible behaviour.
>
> A silly bracket transposition error on my part :P
The corrected version works noticeably better, but still nowhere near
as well as SCHED_FIFO. The first run had a cluster of really bad
xruns. The second and third were much better, but still with numerous
small xruns.
http://www.joq.us/jack/benchmarks/sched-iso-fix/
With a compile running in the background it was a complete failure.
Some kind of big xrun storm triggered a collapse on every attempt.
http://www.joq.us/jack/benchmarks/sched-iso-fix+compile/
The summary statistics are mixed. The delay_max is noticeably better
than before, but still much worse than SCHED_FIFO. But, the xruns are
really bad news...
http://www.joq.us/jack/benchmarks/.SUMMARY
# sched-iso-fix
Delay Maximum . . . . . . . . : 33894 usecs
Delay Maximum . . . . . . . . : 745 usecs
Delay Maximum . . . . . . . . : 341 usecs
# sched-iso
Delay Maximum . . . . . . . . : 21410 usecs
Delay Maximum . . . . . . . . : 36830 usecs
Delay Maximum . . . . . . . . : 4062 usecs
# sched-fifo
Delay Maximum . . . . . . . . : 347 usecs
Delay Maximum . . . . . . . . : 277 usecs
Delay Maximum . . . . . . . . : 246 usecs
Delay Maximum . . . . . . . . : 199 usecs
Delay Maximum . . . . . . . . : 261 usecs
Delay Maximum . . . . . . . . : 305 usecs
# sched-iso-fix+compile
Delay Maximum . . . . . . . . : 14549 usecs
Delay Maximum . . . . . . . . : 38961 usecs
Delay Maximum . . . . . . . . : 26904 usecs
# sched-iso+compile
Delay Maximum . . . . . . . . : 98909 usecs
Delay Maximum . . . . . . . . : 39414 usecs
Delay Maximum . . . . . . . . : 40294 usecs
Delay Maximum . . . . . . . . : 217192 usecs
Delay Maximum . . . . . . . . : 156989 usecs
# sched-fifo+compile
Delay Maximum . . . . . . . . : 285 usecs
Delay Maximum . . . . . . . . : 269 usecs
Delay Maximum . . . . . . . . : 277 usecs
Delay Maximum . . . . . . . . : 569 usecs
Delay Maximum . . . . . . . . : 461 usecs
Delay Maximum . . . . . . . . : 405 usecs
Delay Maximum . . . . . . . . : 286 usecs
Delay Maximum . . . . . . . . : 579 usecs
# sched-iso-fix
XRUN Count . . . . . . . . . : 26
XRUN Count . . . . . . . . . : 24
XRUN Count . . . . . . . . . : 17
# sched-iso
XRUN Count . . . . . . . . . : 15
XRUN Count . . . . . . . . . : 17
XRUN Count . . . . . . . . . : 5
# sched-fifo
XRUN Count . . . . . . . . . : 0
XRUN Count . . . . . . . . . : 0
XRUN Count . . . . . . . . . : 0
XRUN Count . . . . . . . . . : 0
XRUN Count . . . . . . . . . : 0
XRUN Count . . . . . . . . . : 0
# sched-iso-fix+compile
XRUN Count . . . . . . . . . : 22
XRUN Count . . . . . . . . . : 44
XRUN Count . . . . . . . . . : 39
# sched-iso+compile
XRUN Count . . . . . . . . . : 44
XRUN Count . . . . . . . . . : 46
XRUN Count . . . . . . . . . : 45
XRUN Count . . . . . . . . . : 27
XRUN Count . . . . . . . . . : 101
# sched-fifo+compile
XRUN Count . . . . . . . . . : 0
XRUN Count . . . . . . . . . : 0
XRUN Count . . . . . . . . . : 0
XRUN Count . . . . . . . . . : 4
XRUN Count . . . . . . . . . : 0
XRUN Count . . . . . . . . . : 0
XRUN Count . . . . . . . . . : 0
XRUN Count . . . . . . . . . : 0
--
joq
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH] sched - Implement priority and fifo support for SCHED_ISO
2005-01-31 22:51 ` Jack O'Quin
@ 2005-01-31 23:01 ` Con Kolivas
2005-02-01 2:27 ` Jack O'Quin
0 siblings, 1 reply; 11+ messages in thread
From: Con Kolivas @ 2005-01-31 23:01 UTC (permalink / raw)
To: Jack O'Quin
Cc: linux kernel, Andrew Morton, Ingo Molnar, Alexander Nyberg,
Zwane Mwaikambo
Jack O'Quin wrote:
> Con Kolivas <kernel@kolivas.org> writes:
>
>
>>Sure enough I found the bug in less than 5 mins, and it would
>>definitely cause this terrible behaviour.
>>
>>A silly bracket transposition error on my part :P
>
>
> The corrected version works noticeably better, but still nowhere near
> as well as SCHED_FIFO. The first run had a cluster of really bad
> xruns. The second and third were much better, but still with numerous
> small xruns.
>
> http://www.joq.us/jack/benchmarks/sched-iso-fix/
>
> With a compile running in the background it was a complete failure.
> Some kind of big xrun storm triggered a collapse on every attempt.
>
> http://www.joq.us/jack/benchmarks/sched-iso-fix+compile/
>
> The summary statistics are mixed. The delay_max is noticeably better
> than before, but still much worse than SCHED_FIFO. But, the xruns are
> really bad news...
Excellent.
Believe it or not these look like good results to me. Your XRUNS are
happening when the DSP load is >70% which is the iso_cpu % cutoff. Try
setting the iso_cpu to 90%
echo 90 > /proc/sys/kernel/iso_cpu
Cheers,
Con
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH] sched - Implement priority and fifo support for SCHED_ISO
2005-01-31 23:01 ` Con Kolivas
@ 2005-02-01 2:27 ` Jack O'Quin
2005-02-01 2:46 ` Con Kolivas
0 siblings, 1 reply; 11+ messages in thread
From: Jack O'Quin @ 2005-02-01 2:27 UTC (permalink / raw)
To: Con Kolivas
Cc: linux kernel, Andrew Morton, Ingo Molnar, Alexander Nyberg,
Zwane Mwaikambo
> Jack O'Quin wrote:
>> The corrected version works noticeably better, but still nowhere near
>> as well as SCHED_FIFO. The first run had a cluster of really bad
>> xruns. The second and third were much better, but still with numerous
>> small xruns.
>>
>> http://www.joq.us/jack/benchmarks/sched-iso-fix/
>>
>> With a compile running in the background it was a complete failure.
>> Some kind of big xrun storm triggered a collapse on every attempt.
>>
>> http://www.joq.us/jack/benchmarks/sched-iso-fix+compile/
>>
>> The summary statistics are mixed. The delay_max is noticeably better
>> than before, but still much worse than SCHED_FIFO. But, the xruns are
>> really bad news...
Con Kolivas <kernel@kolivas.org> writes:
> Believe it or not these look like good results to me. Your XRUNS are
> happening when the DSP load is >70% which is the iso_cpu % cutoff. Try
> setting the iso_cpu to 90%
>
> echo 90 > /proc/sys/kernel/iso_cpu
I ran them again with that setting. But, don't forget the large
number of xruns before, even running without the compiles in the
background. There are still way too many of those, although the third
run was clean. If you can get them all to work like that, we'll
really have something.
http://www.joq.us/jack/benchmarks/sched-iso-fix.90
With a compile running in the background, the entire run did not fail
completely as it had at 70%. But there are still way too many xruns.
http://www.joq.us/jack/benchmarks/sched-iso-fix.90+compile
I moved a bunch of directories testing older prototypes to a .old
subdirectory, they no longer clutter up the summary statistics.
http://www.joq.us/jack/benchmarks/.SUMMARY
The fact that the results did improve with the 90% setting suggests
that there may be a bug in your throttling or time accounting. The
DSP load for this test should hover around 50% when things are working
properly. It should never hit a 70% limit, not even momentarily. The
background compile should not affect that, either.
Something seems to be causing scheduling delays when the sound card
interrupt causes jackd to become runnable. Ingo's nice(-20) patches
seem to have the same problem, but his RLIMIT_RT_CPU version does not.
This is still not working well enough for JACK users. Long and
variable trigger latencies after hardware interrupts are deadly to any
serious realtime application.
--
joq
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH] sched - Implement priority and fifo support for SCHED_ISO
2005-02-01 2:27 ` Jack O'Quin
@ 2005-02-01 2:46 ` Con Kolivas
2005-02-01 4:44 ` Jack O'Quin
0 siblings, 1 reply; 11+ messages in thread
From: Con Kolivas @ 2005-02-01 2:46 UTC (permalink / raw)
To: Jack O'Quin
Cc: linux kernel, Andrew Morton, Ingo Molnar, Alexander Nyberg,
Zwane Mwaikambo
Jack O'Quin wrote:
> The fact that the results did improve with the 90% setting suggests
> that there may be a bug in your throttling or time accounting. The
> DSP load for this test should hover around 50% when things are working
> properly. It should never hit a 70% limit, not even momentarily. The
> background compile should not affect that, either.
>
> Something seems to be causing scheduling delays when the sound card
> interrupt causes jackd to become runnable. Ingo's nice(-20) patches
> seem to have the same problem, but his RLIMIT_RT_CPU version does not.
Good work. Looks like you're probably right about the accounting. It may
be as simple as the fact that it is on the timer tick that we're getting
rescheduled and this ends up being accounted as more since the
accounting happens only at the scheduler tick. A test run setting
iso_cpu at 100% should tell you if it's accounting related - however the
RLIMIT_RT_CPU patch is accounted in a similar way so I'm not sure there
isn't another bug hanging around. I'm afraid on my hardware it has been
behaving just like SCHED_FIFO for some time which is why I've been
hanging on your results. You're not obliged to do anything (obviously),
but the 100% run should help discriminate where the problem is.
Since I've come this far with the code I'll have another look for any
other obvious bugs.
Cheers,
Con
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH] sched - Implement priority and fifo support for SCHED_ISO
2005-02-01 2:46 ` Con Kolivas
@ 2005-02-01 4:44 ` Jack O'Quin
2005-02-01 4:56 ` Con Kolivas
0 siblings, 1 reply; 11+ messages in thread
From: Jack O'Quin @ 2005-02-01 4:44 UTC (permalink / raw)
To: Con Kolivas
Cc: linux kernel, Andrew Morton, Ingo Molnar, Alexander Nyberg,
Zwane Mwaikambo
Con Kolivas <kernel@kolivas.org> writes:
> Good work. Looks like you're probably right about the accounting. It
> may be as simple as the fact that it is on the timer tick that we're
> getting rescheduled and this ends up being accounted as more since the
> accounting happens only at the scheduler tick. A test run setting
> iso_cpu at 100% should tell you if it's accounting related - however
> the RLIMIT_RT_CPU patch is accounted in a similar way so I'm not sure
> there isn't another bug hanging around.
> I'm afraid on my hardware it has been behaving just like SCHED_FIFO
> for some time which is why I've been hanging on your results.
My guess is that most of this test fits inside that huge cache of
yours, making it run much faster than on my system. You probably need
to increase the number of clients to get comparable results.
When you say just like SCHED_FIFO, do you mean completely clean? Or
are you still getting unexplained xruns? If that's the case, we need
to figure out why and eliminate them.
The reason I can measure an effect here is that the test is heavy
enough to stress my system and the system is RT-clean enough for
SCHED_FIFO to work properly. (That's no surprise, I've been running
it that way for years.)
> You're not obliged to do anything (obviously), but the 100% run
> should help discriminate where the problem is.
I don't mind. It's the main way I can help. I just get busy some of
the time.
It did work better. On the first run, there were a couple of real bad
xruns starting up. But, the other two runs look fairly clean.
http://www.joq.us/jack/benchmarks/sched-iso-fix.100
With a compile running, bad xruns and really long delays become a
serious problem again.
http://www.joq.us/jack/benchmarks/sched-iso-fix.100+compile
Comparing the summary statistics with the 90% run, suggests that the
same problems occur in both cases, but not as often at 100%.
http://www.joq.us/jack/benchmarks/.SUMMARY
With these latency demands, the system can't ever pick the wrong
thread on exit from even a single interrupt, or we're screwed. I am
pretty well convinced this is not happening reliably (except with
SCHED_FIFO).
--
joq
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH] sched - Implement priority and fifo support for SCHED_ISO
2005-02-01 4:44 ` Jack O'Quin
@ 2005-02-01 4:56 ` Con Kolivas
0 siblings, 0 replies; 11+ messages in thread
From: Con Kolivas @ 2005-02-01 4:56 UTC (permalink / raw)
To: Jack O'Quin
Cc: linux kernel, Andrew Morton, Ingo Molnar, Alexander Nyberg,
Zwane Mwaikambo
Jack O'Quin wrote:
> Con Kolivas <kernel@kolivas.org> writes:
>
>
>>Good work. Looks like you're probably right about the accounting. It
>>may be as simple as the fact that it is on the timer tick that we're
>>getting rescheduled and this ends up being accounted as more since the
>>accounting happens only at the scheduler tick. A test run setting
>>iso_cpu at 100% should tell you if it's accounting related - however
>>the RLIMIT_RT_CPU patch is accounted in a similar way so I'm not sure
>>there isn't another bug hanging around.
>
>
>>I'm afraid on my hardware it has been behaving just like SCHED_FIFO
>>for some time which is why I've been hanging on your results.
>
>
> My guess is that most of this test fits inside that huge cache of
> yours, making it run much faster than on my system. You probably need
> to increase the number of clients to get comparable results.
Bah increasing the clients from 14 to 20 the script just fails in some
meaningless way
Killed
[con@laptop jack_test4.1]$ [1/1] jack_test4_client (17/20) stopped.
[1/1] jack_test4_client (18/20) stopped.
./run.sh: line 153: 7504 Broken pipe ${CMD} >>${LOG} 2>&1
[1/1] jack_test4_client ( 2/20) stopped.
./run.sh: line 153: 7507 Broken pipe ${CMD} >>${LOG} 2>&1
even before it starts :(
>
> When you say just like SCHED_FIFO, do you mean completely clean? Or
> are you still getting unexplained xruns? If that's the case, we need
> to figure out why and eliminate them.
On my P4 with the results I posted I am getting no xruns whatsoever with
either SCHED_FIFO or ISO. As for the pentiumM I've given up trying to
use that for latency runs since even with everything shut down and the
file system with journal off running SCHED_FIFO I get 8ms peaks every 20
seconds. I'll keep blaming reiserfs for that one. Only dropping to
single user mode and unmounting filesystems can get rid of them.
> The reason I can measure an effect here is that the test is heavy
> enough to stress my system and the system is RT-clean enough for
> SCHED_FIFO to work properly. (That's no surprise, I've been running
> it that way for years.)
Yeah I understand. I'm kinda stuck with hardware that either doesn't
have a problem, or an installation too flawed to use.
> It did work better. On the first run, there were a couple of real bad
> xruns starting up. But, the other two runs look fairly clean.
>
> http://www.joq.us/jack/benchmarks/sched-iso-fix.100
>
> With a compile running, bad xruns and really long delays become a
> serious problem again.
>
> http://www.joq.us/jack/benchmarks/sched-iso-fix.100+compile
>
> Comparing the summary statistics with the 90% run, suggests that the
> same problems occur in both cases, but not as often at 100%.
>
> http://www.joq.us/jack/benchmarks/.SUMMARY
>
> With these latency demands, the system can't ever pick the wrong
> thread on exit from even a single interrupt, or we're screwed. I am
> pretty well convinced this is not happening reliably (except with
> SCHED_FIFO).
Looking at the code I see some bias towards keeping the cpu count too
high (it decays too slowly) but your results confirm a bigger problem
definitely exists. At 100% it should behave the same as SCHED_FIFO
without mlock, and it is not in your test. I simply need to look at my
code harder.
Cheers,
Con
^ permalink raw reply [flat|nested] 11+ messages in thread
end of thread, other threads:[~2005-02-01 4:55 UTC | newest]
Thread overview: 11+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2005-01-26 9:47 [PATCH] sched - Implement priority and fifo support for SCHED_ISO Con Kolivas
2005-01-31 18:54 ` Jack O'Quin
2005-01-31 20:15 ` Con Kolivas
2005-01-31 20:30 ` Con Kolivas
2005-01-31 21:04 ` Jack O'Quin
2005-01-31 22:51 ` Jack O'Quin
2005-01-31 23:01 ` Con Kolivas
2005-02-01 2:27 ` Jack O'Quin
2005-02-01 2:46 ` Con Kolivas
2005-02-01 4:44 ` Jack O'Quin
2005-02-01 4:56 ` Con Kolivas
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox