From: Dave Hansen <haveblue@us.ibm.com>
To: Matthew Wilcox <willy@debian.org>
Cc: kernel-janitor-discuss
<kernel-janitor-discuss@lists.sourceforge.net>,
linux-fsdevel@vger.kernel.org
Subject: Re: BKL removal
Date: Thu, 04 Jul 2002 11:56:12 -0700 [thread overview]
Message-ID: <3D249A4C.8050703@us.ibm.com> (raw)
In-Reply-To: 20020704134122.V27706@parcelfarce.linux.theplanet.co.uk
[-- Attachment #1: Type: text/plain, Size: 148 bytes --]
Just in case your telepathic patch reception isn't working, here's the
patch with added release-on-sleep fun.
--
Dave Hansen
haveblue@us.ibm.com
[-- Attachment #2: bkl_debug-2.5.24-12.patch --]
[-- Type: text/plain, Size: 18973 bytes --]
diff -ur linux-2.5.24-clean/Makefile linux-2.5.24-dirty/Makefile
--- linux-2.5.24-clean/Makefile Thu Jun 20 15:53:44 2002
+++ linux-2.5.24-dirty/Makefile Thu Jul 4 11:32:23 2002
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 5
SUBLEVEL = 24
-EXTRAVERSION =
+EXTRAVERSION =-bkldebug12
# We are using a recursive build, so we need to do a little thinking
# to get the ordering right.
diff -ur linux-2.5.24-clean/arch/i386/Config.help linux-2.5.24-dirty/arch/i386/Config.help
--- linux-2.5.24-clean/arch/i386/Config.help Thu Jun 20 15:53:44 2002
+++ linux-2.5.24-dirty/arch/i386/Config.help Thu Jul 4 11:32:07 2002
@@ -932,6 +932,15 @@
best used in conjunction with the NMI watchdog so that spinlock
deadlocks are also debuggable.
+CONFIG_DEBUG_BKL
+ Say Y here to get interesting information about the Big Kernel
+ Lock's use in dmesg.
+ Shows information on the following:
+ - nested holds of BKL
+ - releases in schedule (not yet implemented)
+ - use in interrupts (not yet implemented)
+ Send any interesting output to Dave Hansen <haveblue@us.ibm.com>
+
CONFIG_DEBUG_BUGVERBOSE
Say Y here to make BUG() panics output the file name and line number
of the BUG call as well as the EIP and oops trace. This aids
diff -ur linux-2.5.24-clean/arch/i386/config.in linux-2.5.24-dirty/arch/i386/config.in
--- linux-2.5.24-clean/arch/i386/config.in Thu Jun 20 15:53:49 2002
+++ linux-2.5.24-dirty/arch/i386/config.in Thu Jul 4 11:32:07 2002
@@ -416,6 +416,7 @@
bool ' Memory mapped I/O debugging' CONFIG_DEBUG_IOVIRT
bool ' Magic SysRq key' CONFIG_MAGIC_SYSRQ
bool ' Spinlock debugging' CONFIG_DEBUG_SPINLOCK
+ bool ' Big kernel lock (BKL,kernel_flag) debugging' CONFIG_DEBUG_BKL
if [ "$CONFIG_HIGHMEM" = "y" ]; then
bool ' Highmem debugging' CONFIG_DEBUG_HIGHMEM
fi
diff -ur linux-2.5.24-clean/include/asm-alpha/smplock.h linux-2.5.24-dirty/include/asm-alpha/smplock.h
--- linux-2.5.24-clean/include/asm-alpha/smplock.h Thu Jun 20 15:53:49 2002
+++ linux-2.5.24-dirty/include/asm-alpha/smplock.h Thu Jul 4 11:32:07 2002
@@ -39,13 +39,13 @@
* so we only need to worry about other
* CPU's.
*/
-static __inline__ void lock_kernel(void)
+static __inline__ void __lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
-static __inline__ void unlock_kernel(void)
+static __inline__ void __unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
diff -ur linux-2.5.24-clean/include/asm-arm/smplock.h linux-2.5.24-dirty/include/asm-arm/smplock.h
--- linux-2.5.24-clean/include/asm-arm/smplock.h Thu Jun 20 15:53:43 2002
+++ linux-2.5.24-dirty/include/asm-arm/smplock.h Thu Jul 4 11:32:07 2002
@@ -41,7 +41,7 @@
* so we only need to worry about other
* CPU's.
*/
-static inline void lock_kernel(void)
+static inline void __lock_kernel(void)
{
#ifdef CONFIG_PREEMPT
if (current->lock_depth == -1)
@@ -53,7 +53,7 @@
#endif
}
-static inline void unlock_kernel(void)
+static inline void __unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
diff -ur linux-2.5.24-clean/include/asm-cris/smp_lock.h linux-2.5.24-dirty/include/asm-cris/smp_lock.h
--- linux-2.5.24-clean/include/asm-cris/smp_lock.h Thu Jun 20 15:53:44 2002
+++ linux-2.5.24-dirty/include/asm-cris/smp_lock.h Thu Jul 4 11:32:07 2002
@@ -11,7 +11,7 @@
* Locking the kernel
*/
-extern __inline void lock_kernel(void)
+extern __inline void __lock_kernel(void)
{
unsigned long flags;
int proc = smp_processor_id();
@@ -49,7 +49,7 @@
restore_flags(flags);
}
-extern __inline void unlock_kernel(void)
+extern __inline void __unlock_kernel(void)
{
unsigned long flags;
save_flags(flags);
diff -ur linux-2.5.24-clean/include/asm-cris/smplock.h linux-2.5.24-dirty/include/asm-cris/smplock.h
--- linux-2.5.24-clean/include/asm-cris/smplock.h Thu Jun 20 15:53:52 2002
+++ linux-2.5.24-dirty/include/asm-cris/smplock.h Thu Jul 4 11:32:07 2002
@@ -11,8 +11,8 @@
#ifndef CONFIG_SMP
-#define lock_kernel() do { } while(0)
-#define unlock_kernel() do { } while(0)
+#define __lock_kernel() do { } while(0)
+#define __unlock_kernel() do { } while(0)
#define release_kernel_lock(task, cpu, depth) ((depth) = 1)
#define reacquire_kernel_lock(task, cpu, depth) do { } while(0)
diff -ur linux-2.5.24-clean/include/asm-generic/smplock.h linux-2.5.24-dirty/include/asm-generic/smplock.h
--- linux-2.5.24-clean/include/asm-generic/smplock.h Thu Jun 20 15:53:43 2002
+++ linux-2.5.24-dirty/include/asm-generic/smplock.h Thu Jul 4 11:32:07 2002
@@ -38,13 +38,13 @@
* so we only need to worry about other
* CPU's.
*/
-extern __inline__ void lock_kernel(void)
+extern __inline__ void __lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
-extern __inline__ void unlock_kernel(void)
+extern __inline__ void __unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
diff -ur linux-2.5.24-clean/include/asm-i386/smplock.h linux-2.5.24-dirty/include/asm-i386/smplock.h
--- linux-2.5.24-clean/include/asm-i386/smplock.h Thu Jun 20 15:53:49 2002
+++ linux-2.5.24-dirty/include/asm-i386/smplock.h Thu Jul 4 11:32:07 2002
@@ -54,7 +54,7 @@
* so we only need to worry about other
* CPU's.
*/
-static __inline__ void lock_kernel(void)
+static __inline__ void __lock_kernel(void)
{
#ifdef CONFIG_PREEMPT
if (current->lock_depth == -1)
@@ -76,7 +76,7 @@
#endif
}
-static __inline__ void unlock_kernel(void)
+static __inline__ void __unlock_kernel(void)
{
if (current->lock_depth < 0)
BUG();
diff -ur linux-2.5.24-clean/include/asm-ia64/smplock.h linux-2.5.24-dirty/include/asm-ia64/smplock.h
--- linux-2.5.24-clean/include/asm-ia64/smplock.h Thu Jun 20 15:53:54 2002
+++ linux-2.5.24-dirty/include/asm-ia64/smplock.h Thu Jul 4 11:32:07 2002
@@ -51,14 +51,14 @@
* CPU's.
*/
static __inline__ void
-lock_kernel(void)
+__lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
static __inline__ void
-unlock_kernel(void)
+__unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
diff -ur linux-2.5.24-clean/include/asm-m68k/smplock.h linux-2.5.24-dirty/include/asm-m68k/smplock.h
--- linux-2.5.24-clean/include/asm-m68k/smplock.h Thu Jun 20 15:53:48 2002
+++ linux-2.5.24-dirty/include/asm-m68k/smplock.h Thu Jul 4 11:32:07 2002
@@ -38,13 +38,13 @@
* so we only need to worry about other
* CPU's.
*/
-extern __inline__ void lock_kernel(void)
+extern __inline__ void __lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
-extern __inline__ void unlock_kernel(void)
+extern __inline__ void __unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
diff -ur linux-2.5.24-clean/include/asm-mips/smplock.h linux-2.5.24-dirty/include/asm-mips/smplock.h
--- linux-2.5.24-clean/include/asm-mips/smplock.h Thu Jun 20 15:53:49 2002
+++ linux-2.5.24-dirty/include/asm-mips/smplock.h Thu Jul 4 11:32:07 2002
@@ -41,13 +41,13 @@
* so we only need to worry about other
* CPU's.
*/
-extern __inline__ void lock_kernel(void)
+extern __inline__ void __lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
-extern __inline__ void unlock_kernel(void)
+extern __inline__ void __unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
diff -ur linux-2.5.24-clean/include/asm-mips64/smplock.h linux-2.5.24-dirty/include/asm-mips64/smplock.h
--- linux-2.5.24-clean/include/asm-mips64/smplock.h Thu Jun 20 15:53:49 2002
+++ linux-2.5.24-dirty/include/asm-mips64/smplock.h Thu Jul 4 11:32:07 2002
@@ -41,13 +41,13 @@
* so we only need to worry about other
* CPU's.
*/
-static __inline__ void lock_kernel(void)
+static __inline__ void __lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
-static __inline__ void unlock_kernel(void)
+static __inline__ void __unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
diff -ur linux-2.5.24-clean/include/asm-parisc/smplock.h linux-2.5.24-dirty/include/asm-parisc/smplock.h
--- linux-2.5.24-clean/include/asm-parisc/smplock.h Thu Jun 20 15:53:43 2002
+++ linux-2.5.24-dirty/include/asm-parisc/smplock.h Thu Jul 4 11:32:07 2002
@@ -36,13 +36,13 @@
* so we only need to worry about other
* CPU's.
*/
-extern __inline__ void lock_kernel(void)
+extern __inline__ void __lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
-extern __inline__ void unlock_kernel(void)
+extern __inline__ void __unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
diff -ur linux-2.5.24-clean/include/asm-ppc/smplock.h linux-2.5.24-dirty/include/asm-ppc/smplock.h
--- linux-2.5.24-clean/include/asm-ppc/smplock.h Thu Jun 20 15:53:48 2002
+++ linux-2.5.24-dirty/include/asm-ppc/smplock.h Thu Jul 4 11:32:07 2002
@@ -47,7 +47,7 @@
* so we only need to worry about other
* CPU's.
*/
-static __inline__ void lock_kernel(void)
+static __inline__ void __lock_kernel(void)
{
#ifdef CONFIG_PREEMPT
if (current->lock_depth == -1)
@@ -59,7 +59,7 @@
#endif /* CONFIG_PREEMPT */
}
-static __inline__ void unlock_kernel(void)
+static __inline__ void __unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
diff -ur linux-2.5.24-clean/include/asm-ppc64/smplock.h linux-2.5.24-dirty/include/asm-ppc64/smplock.h
--- linux-2.5.24-clean/include/asm-ppc64/smplock.h Thu Jun 20 15:53:47 2002
+++ linux-2.5.24-dirty/include/asm-ppc64/smplock.h Thu Jul 4 11:32:07 2002
@@ -43,13 +43,13 @@
* so we only need to worry about other
* CPU's.
*/
-static __inline__ void lock_kernel(void)
+static __inline__ void __lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
-static __inline__ void unlock_kernel(void)
+static __inline__ void __unlock_kernel(void)
{
if (current->lock_depth < 0)
BUG();
diff -ur linux-2.5.24-clean/include/asm-s390/smplock.h linux-2.5.24-dirty/include/asm-s390/smplock.h
--- linux-2.5.24-clean/include/asm-s390/smplock.h Thu Jun 20 15:53:55 2002
+++ linux-2.5.24-dirty/include/asm-s390/smplock.h Thu Jul 4 11:32:07 2002
@@ -48,13 +48,13 @@
* so we only need to worry about other
* CPU's.
*/
-extern __inline__ void lock_kernel(void)
+extern __inline__ void __lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
-extern __inline__ void unlock_kernel(void)
+extern __inline__ void __unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
diff -ur linux-2.5.24-clean/include/asm-s390x/smplock.h linux-2.5.24-dirty/include/asm-s390x/smplock.h
--- linux-2.5.24-clean/include/asm-s390x/smplock.h Thu Jun 20 15:53:49 2002
+++ linux-2.5.24-dirty/include/asm-s390x/smplock.h Thu Jul 4 11:32:07 2002
@@ -48,13 +48,13 @@
* so we only need to worry about other
* CPU's.
*/
-extern __inline__ void lock_kernel(void)
+extern __inline__ void __lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
-extern __inline__ void unlock_kernel(void)
+extern __inline__ void __unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
diff -ur linux-2.5.24-clean/include/asm-sh/smplock.h linux-2.5.24-dirty/include/asm-sh/smplock.h
--- linux-2.5.24-clean/include/asm-sh/smplock.h Thu Jun 20 15:53:46 2002
+++ linux-2.5.24-dirty/include/asm-sh/smplock.h Thu Jul 4 11:32:07 2002
@@ -11,8 +11,8 @@
#ifndef CONFIG_SMP
-#define lock_kernel() do { } while(0)
-#define unlock_kernel() do { } while(0)
+#define __lock_kernel() do { } while(0)
+#define __unlock_kernel() do { } while(0)
#define release_kernel_lock(task, cpu, depth) ((depth) = 1)
#define reacquire_kernel_lock(task, cpu, depth) do { } while(0)
diff -ur linux-2.5.24-clean/include/asm-sparc/smplock.h linux-2.5.24-dirty/include/asm-sparc/smplock.h
--- linux-2.5.24-clean/include/asm-sparc/smplock.h Thu Jun 20 15:53:53 2002
+++ linux-2.5.24-dirty/include/asm-sparc/smplock.h Thu Jul 4 11:32:07 2002
@@ -42,13 +42,13 @@
* so we only need to worry about other
* CPU's.
*/
-#define lock_kernel() \
+#define __lock_kernel() \
do { \
if (!++current->lock_depth) \
spin_lock(&kernel_flag); \
} while(0)
-#define unlock_kernel() \
+#define __unlock_kernel() \
do { \
if (--current->lock_depth < 0) \
spin_unlock(&kernel_flag); \
diff -ur linux-2.5.24-clean/include/asm-sparc64/smplock.h linux-2.5.24-dirty/include/asm-sparc64/smplock.h
--- linux-2.5.24-clean/include/asm-sparc64/smplock.h Thu Jun 20 15:53:56 2002
+++ linux-2.5.24-dirty/include/asm-sparc64/smplock.h Thu Jul 4 11:32:07 2002
@@ -50,13 +50,13 @@
* so we only need to worry about other
* CPU's.
*/
-#define lock_kernel() \
+#define __lock_kernel() \
do { \
if (!++current->lock_depth) \
spin_lock(&kernel_flag); \
} while(0)
-#define unlock_kernel() \
+#define __unlock_kernel() \
do { \
if (--current->lock_depth < 0) \
spin_unlock(&kernel_flag); \
diff -ur linux-2.5.24-clean/include/asm-x86_64/smplock.h linux-2.5.24-dirty/include/asm-x86_64/smplock.h
--- linux-2.5.24-clean/include/asm-x86_64/smplock.h Thu Jun 20 15:53:51 2002
+++ linux-2.5.24-dirty/include/asm-x86_64/smplock.h Thu Jul 4 11:32:07 2002
@@ -54,7 +54,7 @@
* so we only need to worry about other
* CPU's.
*/
-extern __inline__ void lock_kernel(void)
+extern __inline__ void __lock_kernel(void)
{
#ifdef CONFIG_PREEMPT
if (current->lock_depth == -1)
@@ -76,7 +76,7 @@
#endif
}
-extern __inline__ void unlock_kernel(void)
+extern __inline__ void __unlock_kernel(void)
{
if (current->lock_depth < 0)
BUG();
diff -ur linux-2.5.24-clean/include/linux/sched.h linux-2.5.24-dirty/include/linux/sched.h
--- linux-2.5.24-clean/include/linux/sched.h Thu Jun 20 15:53:44 2002
+++ linux-2.5.24-dirty/include/linux/sched.h Thu Jul 4 11:32:07 2002
@@ -28,6 +28,7 @@
#include <linux/securebits.h>
#include <linux/fs_struct.h>
#include <linux/compiler.h>
+#include <linux/spinlock.h>
struct exec_domain;
@@ -255,6 +256,8 @@
unsigned long ptrace;
int lock_depth; /* Lock depth */
+ struct lock_trace lt[MAX_LOCK_TRACE_DEPTH];
+ int lt_dirty; /* don't print redundant stuff */
int prio, static_prio;
list_t run_list;
diff -ur linux-2.5.24-clean/include/linux/smp_lock.h linux-2.5.24-dirty/include/linux/smp_lock.h
--- linux-2.5.24-clean/include/linux/smp_lock.h Thu Jun 20 15:53:48 2002
+++ linux-2.5.24-dirty/include/linux/smp_lock.h Thu Jul 4 11:32:07 2002
@@ -5,8 +5,8 @@
#if !defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT)
-#define lock_kernel() do { } while(0)
-#define unlock_kernel() do { } while(0)
+#define __lock_kernel() do { } while(0)
+#define __unlock_kernel() do { } while(0)
#define release_kernel_lock(task, cpu) do { } while(0)
#define reacquire_kernel_lock(task) do { } while(0)
#define kernel_locked() 1
diff -ur linux-2.5.24-clean/include/linux/spinlock.h linux-2.5.24-dirty/include/linux/spinlock.h
--- linux-2.5.24-clean/include/linux/spinlock.h Thu Jun 20 15:53:52 2002
+++ linux-2.5.24-dirty/include/linux/spinlock.h Thu Jul 4 11:38:02 2002
@@ -197,6 +197,105 @@
#define write_trylock(lock) _raw_write_trylock(lock)
#endif
+#ifdef CONFIG_DEBUG_BKL
+/*
+ * This will increase size of task_struct by
+ * MAX_LOCK_RECURSION*sizeof(lock_trace)
+ *
+ * the longest filename that I can find is 28
+ * KBUILD_BASENAME is 2 shorter than that
+ * find -name '*.[ch]' | awk -F/ '{print length($(NF)), $NF}' | sort -n
+ */
+#define MAX_LOCK_TRACE_DEPTH 16
+struct lock_trace {
+ char func_name[32];
+ unsigned int line;
+};
+
+#define LT_ENTRY(i) (current->lt[(i)])
+#define LT_DIRTY (current->lt_dirty)
+#define BKL_DEPTH (current->lock_depth)
+#define CURR_LT_ENTRY (LT_ENTRY(current->lock_depth))
+#define LT_LABEL (__stringify(KBUILD_BASENAME))
+
+#define print_bkl_trace(MESSAGE) \
+printk( MESSAGE ", depth: %d\n", BKL_DEPTH ); \
+{ \
+ int i; \
+ for( i=0; LT_DIRTY && i<BKL_DEPTH; i++ ) { \
+ printk( "[%2d]%s:%d\n", i, \
+ LT_ENTRY(i).func_name, \
+ LT_ENTRY(i).line ); \
+ } \
+} \
+
+#define lock_kernel() \
+do { \
+ __lock_kernel(); \
+ if( in_irq() ) { \
+ printk( "BKL held in irq, %s:%s:%d\n", \
+ (__stringify(KBUILD_BASENAME)), \
+ __FUNCTION__, \
+ __LINE__ ); \
+ } else { \
+ strncpy(CURR_LT_ENTRY.func_name,LT_LABEL,32); \
+ CURR_LT_ENTRY.func_name[31] = '\0'; \
+ CURR_LT_ENTRY.line = __LINE__; \
+ current->lt_dirty = 1; \
+ } \
+} while (0)
+/*
+ * er == expect recursive hold, print if that isn't found
+ */
+#define er_lock_kernel() er_lock_kernel_c(1)
+#define er_lock_kernel_c(PRINT_COUNT) \
+do { \
+ static int prints_allowed = PRINT_COUNT; \
+ if( BKL_DEPTH = -1 && (prints_allowed-->0)) {\
+ printk( "BKL not held, %s:%s:%d\n", \
+ (__stringify(KBUILD_BASENAME)), \
+ __FUNCTION__, \
+ __LINE__ ); \
+ } \
+ lock_kernel(); \
+} while (0)
+
+/*
+ * default number of times to print, and allow overriding it
+ */
+#define unlock_kernel() unlock_kernel_c(1)
+
+#define unlock_kernel_c(PRINT_COUNT) \
+do { \
+ static int prints_allowed = PRINT_COUNT; \
+ \
+ if( !in_irq() && \
+ LT_DIRTY && BKL_DEPTH > 0 && (prints_allowed--) > 0 ) { \
+ print_bkl_trace( "release of recursive BKL hold" ); \
+ } \
+ if( !in_irq() ) \
+ LT_DIRTY = 0; \
+ __unlock_kernel(); \
+} while (0)
+#define unlock_kernel_quiet() \
+do { \
+ current->lt_dirty = 0; \
+ __unlock_kernel(); \
+} while(0)
+
+#else
+#define MAX_LOCK_TRACE_DEPTH 1
+struct lock_trace {};
+#define lock_kernel() __lock_kernel()
+#define lock_kernel_c(x) __lock_kernel()
+#define er_lock_kernel_c(x) __lock_kernel()
+#define er_lock_kernel() __lock_kernel()
+#define unlock_kernel() __unlock_kernel()
+#define unlock_kernel_c(x) __unlock_kernel()
+#define unlock_kernel_quiet() __unlock_kernel()
+#define print_bkl_trace(x) do {} while(0)
+#endif
+
/* "lock on reference count zero" */
#ifndef ATOMIC_DEC_AND_LOCK
#include <asm/atomic.h>
diff -ur linux-2.5.24-clean/kernel/sched.c linux-2.5.24-dirty/kernel/sched.c
--- linux-2.5.24-clean/kernel/sched.c Thu Jun 20 15:53:47 2002
+++ linux-2.5.24-dirty/kernel/sched.c Thu Jul 4 11:32:07 2002
@@ -815,6 +815,9 @@
prev = current;
rq = this_rq();
+ if( kernel_locked() ) {
+ print_bkl_trace("BKL held during sleep");
+ }
release_kernel_lock(prev, smp_processor_id());
prepare_arch_schedule(prev);
prev->sleep_timestamp = jiffies;
prev parent reply other threads:[~2002-07-04 18:56 UTC|newest]
Thread overview: 3+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <3D23F306.50703@us.ibm.com>
2002-07-04 12:41 ` BKL removal Matthew Wilcox
2002-07-04 18:44 ` Dave Hansen
2002-07-04 18:56 ` Dave Hansen [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=3D249A4C.8050703@us.ibm.com \
--to=haveblue@us.ibm.com \
--cc=kernel-janitor-discuss@lists.sourceforge.net \
--cc=linux-fsdevel@vger.kernel.org \
--cc=willy@debian.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox