public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [patch/revised] wake_up_info() ...
@ 2004-01-05 18:54 Davide Libenzi
  0 siblings, 0 replies; 6+ messages in thread
From: Davide Libenzi @ 2004-01-05 18:54 UTC (permalink / raw)
  To: Linux Kernel Mailing List; +Cc: Manfred Spraul, Linus Torvalds


Since Manfred and Linus preferred a simpler approach, now the info is 
passed only to the wake up callback, w/out any storage. This works right 
away for callback'd driven wake ups like, for example, epoll. A task that 
wants a std wake up *and* wants to fetch wake up info, needs to initialize 
the wait structure with its own callback function, copy the info during 
the callback call, and wake itself up from inside the callback.
Comments?



- Davide




--- linux-2.5/fs/eventpoll.c._orig	2004-01-05 10:43:55.079273352 -0800
+++ linux-2.5/fs/eventpoll.c	2004-01-05 10:44:39.027592192 -0800
@@ -306,7 +306,8 @@
 static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi);
 static int ep_unlink(struct eventpoll *ep, struct epitem *epi);
 static int ep_remove(struct eventpoll *ep, struct epitem *epi);
-static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync);
+static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync,
+			    unsigned long info);
 static int ep_eventpoll_close(struct inode *inode, struct file *file);
 static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait);
 static int ep_collect_ready_items(struct eventpoll *ep,
@@ -1293,7 +1294,8 @@
  * machanism. It is called by the stored file descriptors when they
  * have events to report.
  */
-static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync)
+static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync,
+			    unsigned long info)
 {
 	int pwake = 0;
 	unsigned long flags;
--- linux-2.5/include/linux/wait.h._orig	2004-01-05 09:22:33.802340240 -0800
+++ linux-2.5/include/linux/wait.h	2004-01-05 10:35:00.030613112 -0800
@@ -17,8 +17,10 @@
 #include <asm/system.h>
 
 typedef struct __wait_queue wait_queue_t;
-typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int sync);
-extern int default_wake_function(wait_queue_t *wait, unsigned mode, int sync);
+typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int sync,
+				 unsigned long info);
+extern int default_wake_function(wait_queue_t *wait, unsigned mode, int sync,
+				 unsigned long info);
 
 struct __wait_queue {
 	unsigned int flags;
@@ -107,6 +109,7 @@
 extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr));
 extern void FASTCALL(__wake_up_locked(wait_queue_head_t *q, unsigned int mode));
 extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr));
+extern void FASTCALL(__wake_up_info(wait_queue_head_t *q, unsigned int mode, int nr, unsigned long info));
 
 #define wake_up(x)			__wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1)
 #define wake_up_nr(x, nr)		__wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr)
@@ -117,6 +120,8 @@
 #define wake_up_interruptible_all(x)	__wake_up((x),TASK_INTERRUPTIBLE, 0)
 #define	wake_up_locked(x)		__wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE)
 #define wake_up_interruptible_sync(x)   __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
+#define wake_up_info(x, i)		__wake_up_info((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, (i))
+#define wake_up_all_info(x, i)		__wake_up_info((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, (i))
 
 #define __wait_event(wq, condition) 					\
 do {									\
@@ -240,7 +245,8 @@
 void FASTCALL(prepare_to_wait_exclusive(wait_queue_head_t *q,
 				wait_queue_t *wait, int state));
 void FASTCALL(finish_wait(wait_queue_head_t *q, wait_queue_t *wait));
-int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync);
+int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync,
+			     unsigned long info);
 
 #define DEFINE_WAIT(name)						\
 	wait_queue_t name = {						\
--- linux-2.5/kernel/sched.c._orig	2004-01-05 09:22:34.609217576 -0800
+++ linux-2.5/kernel/sched.c	2004-01-05 10:26:56.606104808 -0800
@@ -1632,7 +1632,8 @@
 EXPORT_SYMBOL(preempt_schedule);
 #endif /* CONFIG_PREEMPT */
 
-int default_wake_function(wait_queue_t *curr, unsigned mode, int sync)
+int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
+			  unsigned long info)
 {
 	task_t *p = curr->task;
 	return try_to_wake_up(p, mode, sync);
@@ -1649,7 +1650,8 @@
  * started to run but is not in state TASK_RUNNING.  try_to_wake_up() returns
  * zero in this (rare) case, and we handle it by continuing to scan the queue.
  */
-static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync)
+static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
+			     int nr_exclusive, int sync, unsigned long info)
 {
 	struct list_head *tmp, *next;
 
@@ -1658,7 +1660,7 @@
 		unsigned flags;
 		curr = list_entry(tmp, wait_queue_t, task_list);
 		flags = curr->flags;
-		if (curr->func(curr, mode, sync) &&
+		if (curr->func(curr, mode, sync, info) &&
 		    (flags & WQ_FLAG_EXCLUSIVE) &&
 		    !--nr_exclusive)
 			break;
@@ -1676,7 +1678,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&q->lock, flags);
-	__wake_up_common(q, mode, nr_exclusive, 0);
+	__wake_up_common(q, mode, nr_exclusive, 0, 0);
 	spin_unlock_irqrestore(&q->lock, flags);
 }
 
@@ -1687,7 +1689,7 @@
  */
 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
 {
-	__wake_up_common(q, mode, 1, 0);
+	__wake_up_common(q, mode, 1, 0, 0);
 }
 
 /**
@@ -1712,21 +1714,41 @@
 
 	spin_lock_irqsave(&q->lock, flags);
 	if (likely(nr_exclusive))
-		__wake_up_common(q, mode, nr_exclusive, 1);
+		__wake_up_common(q, mode, nr_exclusive, 1, 0);
 	else
-		__wake_up_common(q, mode, nr_exclusive, 0);
+		__wake_up_common(q, mode, nr_exclusive, 0, 0);
 	spin_unlock_irqrestore(&q->lock, flags);
 }
 
 EXPORT_SYMBOL_GPL(__wake_up_sync);	/* For internal use only */
 
+/**
+ * __wake_up_info - wake up threads blocked on a waitqueue by passing an information token.
+ * @q: the waitqueue
+ * @mode: which threads
+ * @nr_exclusive: how many wake-one or wake-many threads to wake up
+ * @info: information token passed to waiters
+ */
+void __wake_up_info(wait_queue_head_t *q, unsigned int mode, int nr_exclusive,
+		    unsigned long info)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&q->lock, flags);
+	__wake_up_common(q, mode, nr_exclusive, 0, info);
+	spin_unlock_irqrestore(&q->lock, flags);
+}
+
+EXPORT_SYMBOL(__wake_up_info);
+
 void complete(struct completion *x)
 {
 	unsigned long flags;
 
 	spin_lock_irqsave(&x->wait.lock, flags);
 	x->done++;
-	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, 0);
+	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
+			 1, 0, 0);
 	spin_unlock_irqrestore(&x->wait.lock, flags);
 }
 
@@ -1738,7 +1760,8 @@
 
 	spin_lock_irqsave(&x->wait.lock, flags);
 	x->done += UINT_MAX/2;
-	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, 0);
+	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
+			 0, 0, 0);
 	spin_unlock_irqrestore(&x->wait.lock, flags);
 }
 
--- linux-2.5/kernel/fork.c._orig	2004-01-05 10:27:49.078127848 -0800
+++ linux-2.5/kernel/fork.c	2004-01-05 10:36:11.912685376 -0800
@@ -194,9 +194,10 @@
 
 EXPORT_SYMBOL(finish_wait);
 
-int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync)
+int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync,
+			     unsigned long info)
 {
-	int ret = default_wake_function(wait, mode, sync);
+	int ret = default_wake_function(wait, mode, sync, info);
 
 	if (ret)
 		list_del_init(&wait->task_list);


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [patch/revised] wake_up_info() ...
       [not found] <fa.kf16nao.126qarq@ifi.uio.no>
@ 2004-01-05 19:28 ` John Gardiner Myers
  2004-01-05 19:33   ` Davide Libenzi
  2004-01-05 20:46   ` Linus Torvalds
  0 siblings, 2 replies; 6+ messages in thread
From: John Gardiner Myers @ 2004-01-05 19:28 UTC (permalink / raw)
  To: linux-kernel

It would seem better if info were a void *, to permit sending more than 
a single unsigned long.



^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [patch/revised] wake_up_info() ...
  2004-01-05 19:28 ` [patch/revised] wake_up_info() John Gardiner Myers
@ 2004-01-05 19:33   ` Davide Libenzi
  2004-01-05 20:16     ` Davide Libenzi
  2004-01-05 20:46   ` Linus Torvalds
  1 sibling, 1 reply; 6+ messages in thread
From: Davide Libenzi @ 2004-01-05 19:33 UTC (permalink / raw)
  To: John Gardiner Myers; +Cc: linux-kernel

On Mon, 5 Jan 2004, John Gardiner Myers wrote:

> It would seem better if info were a void *, to permit sending more than 
> a single unsigned long.

It's fine for me. Linus, Manfred?



- Davide



^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [patch/revised] wake_up_info() ...
  2004-01-05 19:33   ` Davide Libenzi
@ 2004-01-05 20:16     ` Davide Libenzi
  0 siblings, 0 replies; 6+ messages in thread
From: Davide Libenzi @ 2004-01-05 20:16 UTC (permalink / raw)
  To: John Gardiner Myers; +Cc: Linux Kernel Mailing List

On Mon, 5 Jan 2004, Davide Libenzi wrote:

> On Mon, 5 Jan 2004, John Gardiner Myers wrote:
> 
> > It would seem better if info were a void *, to permit sending more than 
> > a single unsigned long.
> 
> It's fine for me. Linus, Manfred?

This is the "void *" version. I slightly prefer the "void *" one.



- Davide




--- linux-2.5/fs/eventpoll.c._orig	2004-01-05 10:43:55.079273352 -0800
+++ linux-2.5/fs/eventpoll.c	2004-01-05 11:36:35.986742376 -0800
@@ -306,7 +306,8 @@
 static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi);
 static int ep_unlink(struct eventpoll *ep, struct epitem *epi);
 static int ep_remove(struct eventpoll *ep, struct epitem *epi);
-static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync);
+static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync,
+			    void *info);
 static int ep_eventpoll_close(struct inode *inode, struct file *file);
 static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait);
 static int ep_collect_ready_items(struct eventpoll *ep,
@@ -1293,7 +1294,8 @@
  * machanism. It is called by the stored file descriptors when they
  * have events to report.
  */
-static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync)
+static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync,
+			    void *info)
 {
 	int pwake = 0;
 	unsigned long flags;
--- linux-2.5/include/linux/wait.h._orig	2004-01-05 09:22:33.802340240 -0800
+++ linux-2.5/include/linux/wait.h	2004-01-05 11:37:39.331112568 -0800
@@ -17,8 +17,10 @@
 #include <asm/system.h>
 
 typedef struct __wait_queue wait_queue_t;
-typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int sync);
-extern int default_wake_function(wait_queue_t *wait, unsigned mode, int sync);
+typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int sync,
+				 void *info);
+extern int default_wake_function(wait_queue_t *wait, unsigned mode, int sync,
+				 void *info);
 
 struct __wait_queue {
 	unsigned int flags;
@@ -107,6 +109,8 @@
 extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr));
 extern void FASTCALL(__wake_up_locked(wait_queue_head_t *q, unsigned int mode));
 extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr));
+extern void FASTCALL(__wake_up_info(wait_queue_head_t *q, unsigned int mode, int nr,
+				    void *info));
 
 #define wake_up(x)			__wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1)
 #define wake_up_nr(x, nr)		__wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr)
@@ -117,6 +121,8 @@
 #define wake_up_interruptible_all(x)	__wake_up((x),TASK_INTERRUPTIBLE, 0)
 #define	wake_up_locked(x)		__wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE)
 #define wake_up_interruptible_sync(x)   __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
+#define wake_up_info(x, i)		__wake_up_info((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, (i))
+#define wake_up_all_info(x, i)		__wake_up_info((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, (i))
 
 #define __wait_event(wq, condition) 					\
 do {									\
@@ -240,7 +246,8 @@
 void FASTCALL(prepare_to_wait_exclusive(wait_queue_head_t *q,
 				wait_queue_t *wait, int state));
 void FASTCALL(finish_wait(wait_queue_head_t *q, wait_queue_t *wait));
-int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync);
+int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync,
+			     void *info);
 
 #define DEFINE_WAIT(name)						\
 	wait_queue_t name = {						\
--- linux-2.5/kernel/sched.c._orig	2004-01-05 09:22:34.609217576 -0800
+++ linux-2.5/kernel/sched.c	2004-01-05 11:40:03.893135800 -0800
@@ -1632,7 +1632,8 @@
 EXPORT_SYMBOL(preempt_schedule);
 #endif /* CONFIG_PREEMPT */
 
-int default_wake_function(wait_queue_t *curr, unsigned mode, int sync)
+int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
+			  void *info)
 {
 	task_t *p = curr->task;
 	return try_to_wake_up(p, mode, sync);
@@ -1649,7 +1650,8 @@
  * started to run but is not in state TASK_RUNNING.  try_to_wake_up() returns
  * zero in this (rare) case, and we handle it by continuing to scan the queue.
  */
-static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync)
+static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
+			     int nr_exclusive, int sync, void *info)
 {
 	struct list_head *tmp, *next;
 
@@ -1658,7 +1660,7 @@
 		unsigned flags;
 		curr = list_entry(tmp, wait_queue_t, task_list);
 		flags = curr->flags;
-		if (curr->func(curr, mode, sync) &&
+		if (curr->func(curr, mode, sync, info) &&
 		    (flags & WQ_FLAG_EXCLUSIVE) &&
 		    !--nr_exclusive)
 			break;
@@ -1676,7 +1678,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&q->lock, flags);
-	__wake_up_common(q, mode, nr_exclusive, 0);
+	__wake_up_common(q, mode, nr_exclusive, 0, NULL);
 	spin_unlock_irqrestore(&q->lock, flags);
 }
 
@@ -1687,7 +1689,7 @@
  */
 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
 {
-	__wake_up_common(q, mode, 1, 0);
+	__wake_up_common(q, mode, 1, 0, NULL);
 }
 
 /**
@@ -1712,21 +1714,41 @@
 
 	spin_lock_irqsave(&q->lock, flags);
 	if (likely(nr_exclusive))
-		__wake_up_common(q, mode, nr_exclusive, 1);
+		__wake_up_common(q, mode, nr_exclusive, 1, NULL);
 	else
-		__wake_up_common(q, mode, nr_exclusive, 0);
+		__wake_up_common(q, mode, nr_exclusive, 0, NULL);
 	spin_unlock_irqrestore(&q->lock, flags);
 }
 
 EXPORT_SYMBOL_GPL(__wake_up_sync);	/* For internal use only */
 
+/**
+ * __wake_up_info - wake up threads blocked on a waitqueue by passing an information token.
+ * @q: the waitqueue
+ * @mode: which threads
+ * @nr_exclusive: how many wake-one or wake-many threads to wake up
+ * @info: information token passed to waiters
+ */
+void __wake_up_info(wait_queue_head_t *q, unsigned int mode, int nr_exclusive,
+		    void *info)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&q->lock, flags);
+	__wake_up_common(q, mode, nr_exclusive, 0, info);
+	spin_unlock_irqrestore(&q->lock, flags);
+}
+
+EXPORT_SYMBOL(__wake_up_info);
+
 void complete(struct completion *x)
 {
 	unsigned long flags;
 
 	spin_lock_irqsave(&x->wait.lock, flags);
 	x->done++;
-	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, 0);
+	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
+			 1, 0, NULL);
 	spin_unlock_irqrestore(&x->wait.lock, flags);
 }
 
@@ -1738,7 +1760,8 @@
 
 	spin_lock_irqsave(&x->wait.lock, flags);
 	x->done += UINT_MAX/2;
-	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, 0);
+	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
+			 0, 0, NULL);
 	spin_unlock_irqrestore(&x->wait.lock, flags);
 }
 
--- linux-2.5/kernel/fork.c._orig	2004-01-05 10:27:49.078127848 -0800
+++ linux-2.5/kernel/fork.c	2004-01-05 11:38:14.161817496 -0800
@@ -194,9 +194,10 @@
 
 EXPORT_SYMBOL(finish_wait);
 
-int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync)
+int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync,
+			     void *info)
 {
-	int ret = default_wake_function(wait, mode, sync);
+	int ret = default_wake_function(wait, mode, sync, info);
 
 	if (ret)
 		list_del_init(&wait->task_list);


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [patch/revised] wake_up_info() ...
  2004-01-05 19:28 ` [patch/revised] wake_up_info() John Gardiner Myers
  2004-01-05 19:33   ` Davide Libenzi
@ 2004-01-05 20:46   ` Linus Torvalds
  2004-01-05 21:34     ` Davide Libenzi
  1 sibling, 1 reply; 6+ messages in thread
From: Linus Torvalds @ 2004-01-05 20:46 UTC (permalink / raw)
  To: John Gardiner Myers; +Cc: linux-kernel



On Mon, 5 Jan 2004, John Gardiner Myers wrote:
>
> It would seem better if info were a void *, to permit sending more than 
> a single unsigned long.

The argument against that is that since there is basically no 
synchronization here, you can't pass a pointer to some random object. So 
by default, you should think of the cookie as "pass-by-value", ie not a 
pointer. That way there are no liveness issues: there is no issue about 
what happens to the data when the recipient is actually scheduled 
(possibly _much_ much after the actual wakeup).

Also, the forseeable actual use of this piece of data is purely integer:
things like the POLLIN | POLLOUT flags. There may never be any other use.

Also, passing in an "unsigned long" does not preclude using a data area
for more complex cases: if the users do their own synchronization around
the waitqueue on a higher level, and a pointer is a valid thing to use,
you can cast that "unsigned long" to a pointer. This is very common kernel
usage: it may not be "portable" in the theoretical sense, but it's deeply
embedded in the kernel that you can pass pointers as just bitpatters that
fit in an "unsigned long".

The basic rule should be: don't build complex infrastructure. Build simple
infrastructure that you can build complexity on top of if you ever need
it. This was my reaction, and apparently Manfred Spraul reacted the same
way.

		Linus

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [patch/revised] wake_up_info() ...
  2004-01-05 20:46   ` Linus Torvalds
@ 2004-01-05 21:34     ` Davide Libenzi
  0 siblings, 0 replies; 6+ messages in thread
From: Davide Libenzi @ 2004-01-05 21:34 UTC (permalink / raw)
  To: Linus Torvalds; +Cc: John Gardiner Myers, Linux Kernel Mailing List

On Mon, 5 Jan 2004, Linus Torvalds wrote:

> 
> 
> On Mon, 5 Jan 2004, John Gardiner Myers wrote:
> >
> > It would seem better if info were a void *, to permit sending more than 
> > a single unsigned long.
> 
> The argument against that is that since there is basically no 
> synchronization here, you can't pass a pointer to some random object. So 
> by default, you should think of the cookie as "pass-by-value", ie not a 
> pointer. That way there are no liveness issues: there is no issue about 
> what happens to the data when the recipient is actually scheduled 
> (possibly _much_ much after the actual wakeup).

An argoument in favour of the "void *" would be that there might be 
situations were a little structure needs to be passed, that will be copied 
by the target of the wakeup (callback) in its own data structure. But as 
you said, we already do "unsigned long" -> "pointer" conversions inside 
the kernel, so it does not really matter. Let's stick with the unsigned 
long version then.



- Davide



^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2004-01-05 21:35 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
     [not found] <fa.kf16nao.126qarq@ifi.uio.no>
2004-01-05 19:28 ` [patch/revised] wake_up_info() John Gardiner Myers
2004-01-05 19:33   ` Davide Libenzi
2004-01-05 20:16     ` Davide Libenzi
2004-01-05 20:46   ` Linus Torvalds
2004-01-05 21:34     ` Davide Libenzi
2004-01-05 18:54 Davide Libenzi

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox