public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [patch] Add a writer prior lock methord for rwlock
@ 2004-08-04  9:09 Liu Tao
  2004-08-04  9:16 ` Arjan van de Ven
  2004-08-04  9:45 ` Oliver Neukum
  0 siblings, 2 replies; 8+ messages in thread
From: Liu Tao @ 2004-08-04  9:09 UTC (permalink / raw)
  To: lkml

The patch adds the write_forcelock() methord, which has higher priority than
read_lock() and write_lock(). The original read_lock() and write_lock() 
is not
touched, and the unlock methord is still write_unlock();
The patch gives implemention on i386, for kernel 2.6.7.

Regards

-----------------------

diff -Naur linux-2.6.7/include/asm-i386/rwlock.h 
linux-2.6.7-dev/include/asm-i386/rwlock.h
--- linux-2.6.7/include/asm-i386/rwlock.h    2004-06-16 
13:20:04.000000000 +0800
+++ linux-2.6.7-dev/include/asm-i386/rwlock.h    2004-08-03 
09:15:02.000000000 +0800
@@ -80,4 +80,37 @@
                             __build_write_lock_ptr(rw, helper); \
                     } while (0)
 
+#define __build_write_forcelock_ptr(rw) \
+    asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \
+             "jnz 2f\n" \
+             "1:\n" \
+             LOCK_SECTION_START("") \
+             "2:\t" \
+             "rep;nop\n\t" \
+             "cmpl $0,(%0)\n\t" \
+             "jnz 2b\n\t" \
+             "jmp 1b\n" \
+             LOCK_SECTION_END \
+             ::"a" (rw) : "memory")
+
+#define __build_write_forcelock_const(rw) \
+    asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",%0\n\t" \
+             "jnz 2f\n" \
+             "1:\n" \
+             LOCK_SECTION_START("") \
+             "2:\t" \
+             "rep;nop\n\t" \
+             "cmpl $0,%0\n\t" \
+             "jnz 2b\n\t" \
+             "jmp 1b\n" \
+             LOCK_SECTION_END \
+             :"=m" (*(volatile int *)rw) : : "memory")
+
+#define __build_write_forcelock(rw)    do { \
+                        if (__builtin_constant_p(rw)) \
+                            __build_write_forcelock_const(rw); \
+                        else \
+                            __build_write_forcelock_ptr(rw); \
+                    } while (0)
+                             
 #endif
diff -Naur linux-2.6.7/include/asm-i386/spinlock.h 
linux-2.6.7-dev/include/asm-i386/spinlock.h
--- linux-2.6.7/include/asm-i386/spinlock.h    2004-06-16 
13:19:02.000000000 +0800
+++ linux-2.6.7-dev/include/asm-i386/spinlock.h    2004-08-02 
16:42:26.000000000 +0800
@@ -139,11 +139,13 @@
  */
 typedef struct {
     volatile unsigned int lock;
+    spinlock_t forcelock;
 #ifdef CONFIG_DEBUG_SPINLOCK
     unsigned magic;
 #endif
 } rwlock_t;
 
+#define RWLOCK_FORCELOCK    , {1 SPINLOCK_MAGIC_INIT}
 #define RWLOCK_MAGIC    0xdeaf1eed
 
 #ifdef CONFIG_DEBUG_SPINLOCK
@@ -152,7 +154,7 @@
 #define RWLOCK_MAGIC_INIT    /* */
 #endif
 
-#define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
+#define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_FORCELOCK 
RWLOCK_MAGIC_INIT }
 
 #define rwlock_init(x)    do { *(x) = RW_LOCK_UNLOCKED; } while(0)
 
@@ -185,6 +187,16 @@
     __build_write_lock(rw, "__write_lock_failed");
 }
 
+static inline void _raw_write_forcelock(rwlock_t *rw)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+    BUG_ON(rw->magic != RWLOCK_MAGIC);
+#endif
+    _raw_spin_lock(&rw->forcelock);
+    __build_write_forcelock(rw);
+    _raw_spin_unlock(&rw->forcelock);
+}
+
 #define _raw_read_unlock(rw)        asm volatile("lock ; incl %0" :"=m" 
((rw)->lock) : : "memory")
 #define _raw_write_unlock(rw)    asm volatile("lock ; addl $" 
RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
 
diff -Naur linux-2.6.7/include/linux/spinlock.h 
linux-2.6.7-dev/include/linux/spinlock.h
--- linux-2.6.7/include/linux/spinlock.h    2004-06-16 
13:19:23.000000000 +0800
+++ linux-2.6.7-dev/include/linux/spinlock.h    2004-08-04 
16:25:37.000000000 +0800
@@ -231,6 +231,12 @@
 } while(0)
 #endif
 
+#define write_forcelock(lock) \
+do { \
+    preempt_disable(); \
+    _raw_write_forcelock(lock); \
+} while (0)
+
 #define read_lock(lock)    \
 do { \
     preempt_disable(); \
@@ -318,6 +324,27 @@
     _raw_write_lock(lock); \
 } while (0)
 
+#define write_forcelock_irqsave(lock, flags) \
+do { \
+    local_irq_save(flags); \
+    preempt_disable(); \
+    _raw_write_forcelock(lock); \
+} while (0)
+
+#define write_forcelock_irq(lock) \
+do { \
+    local_irq_disable(); \
+    preempt_disable(); \
+    _raw_write_forcelock(lock); \
+} while (0)
+
+#define write_forcelock_bh(lock) \
+do { \
+    local_bh_disable(); \
+    preempt_disable(); \
+    _raw_write_forcelock(lock); \
+} while (0)
+
 #define spin_unlock_irqrestore(lock, flags) \
 do { \
     _raw_spin_unlock(lock); \






^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2004-08-04 12:15 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2004-08-04  9:09 [patch] Add a writer prior lock methord for rwlock Liu Tao
2004-08-04  9:16 ` Arjan van de Ven
2004-08-04 10:29   ` Liu Tao
2004-08-04 10:33     ` William Lee Irwin III
2004-08-04 11:30       ` Liu Tao
2004-08-04 12:14     ` Rik van Riel
2004-08-04  9:45 ` Oliver Neukum
2004-08-04 10:30   ` Liu Tao

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox