* [PATCH -rt 1/8] introduce PICK_FUNCTION
@ 2007-08-28 21:37 Daniel Walker
2007-08-28 21:37 ` [PATCH -rt 2/8] spinlocks/rwlocks: use PICK_FUNCTION() Daniel Walker
` (7 more replies)
0 siblings, 8 replies; 10+ messages in thread
From: Daniel Walker @ 2007-08-28 21:37 UTC (permalink / raw)
To: mingo; +Cc: mingo, linux-kernel, linux-rt-users, Peter Zijlstra
[-- Attachment #1: pickop-rt_lock_h.patch --]
[-- Type: text/plain, Size: 9297 bytes --]
PICK_FUNCTION() is similar to the other PICK_OP style macros, and was
created to replace them all. I used variable argument macros to handle
PICK_FUNC_2ARG/PICK_FUNC_1ARG. Otherwise the marcos are similar to the
original macros used for semaphores. The entire system is used to do a
compile time switch between two different locking APIs. For example,
real spinlocks (raw_spinlock_t) and mutexes (or sleeping spinlocks).
This new macro replaces all the duplication from lock type to lock type.
The result of this patch, and the next two, is a fairly nice simplification,
and consolidation. Although the seqlock changes are larger than the originals
I think over all the patchset is worth while.
Incorporated peterz's suggestion to not require TYPE_EQUAL() to only
use pointers.
Signed-off-by: Daniel Walker <dwalker@mvista.com>
---
include/linux/pickop.h | 36 +++++++++++++
include/linux/rt_lock.h | 129 +++++++++++++++---------------------------------
2 files changed, 77 insertions(+), 88 deletions(-)
Index: linux-2.6.22/include/linux/pickop.h
===================================================================
--- /dev/null
+++ linux-2.6.22/include/linux/pickop.h
@@ -0,0 +1,36 @@
+#ifndef _LINUX_PICKOP_H
+#define _LINUX_PICKOP_H
+
+#undef TYPE_EQUAL
+#define TYPE_EQUAL(var, type) \
+ __builtin_types_compatible_p(typeof(var), type *)
+
+#undef PICK_TYPE_EQUAL
+#define PICK_TYPE_EQUAL(var, type) \
+ __builtin_types_compatible_p(typeof(var), type)
+
+extern int __bad_func_type(void);
+
+#define PICK_FUNCTION(type1, type2, func1, func2, arg0, ...) \
+do { \
+ if (PICK_TYPE_EQUAL((arg0), type1)) \
+ func1((type1)(arg0), ##__VA_ARGS__); \
+ else if (PICK_TYPE_EQUAL((arg0), type2)) \
+ func2((type2)(arg0), ##__VA_ARGS__); \
+ else __bad_func_type(); \
+} while (0)
+
+#define PICK_FUNCTION_RET(type1, type2, func1, func2, arg0, ...) \
+({ \
+ unsigned long __ret; \
+ \
+ if (PICK_TYPE_EQUAL((arg0), type1)) \
+ __ret = func1((type1)(arg0), ##__VA_ARGS__); \
+ else if (PICK_TYPE_EQUAL((arg0), type2)) \
+ __ret = func2((type2)(arg0), ##__VA_ARGS__); \
+ else __ret = __bad_func_type(); \
+ \
+ __ret; \
+})
+
+#endif /* _LINUX_PICKOP_H */
Index: linux-2.6.22/include/linux/rt_lock.h
===================================================================
--- linux-2.6.22.orig/include/linux/rt_lock.h
+++ linux-2.6.22/include/linux/rt_lock.h
@@ -156,76 +156,40 @@ extern void fastcall rt_up(struct semaph
extern int __bad_func_type(void);
-#undef TYPE_EQUAL
-#define TYPE_EQUAL(var, type) \
- __builtin_types_compatible_p(typeof(var), type *)
-
-#define PICK_FUNC_1ARG(type1, type2, func1, func2, arg) \
-do { \
- if (TYPE_EQUAL((arg), type1)) \
- func1((type1 *)(arg)); \
- else if (TYPE_EQUAL((arg), type2)) \
- func2((type2 *)(arg)); \
- else __bad_func_type(); \
-} while (0)
+#include <linux/pickop.h>
-#define PICK_FUNC_1ARG_RET(type1, type2, func1, func2, arg) \
-({ \
- unsigned long __ret; \
- \
- if (TYPE_EQUAL((arg), type1)) \
- __ret = func1((type1 *)(arg)); \
- else if (TYPE_EQUAL((arg), type2)) \
- __ret = func2((type2 *)(arg)); \
- else __ret = __bad_func_type(); \
- \
- __ret; \
-})
-
-#define PICK_FUNC_2ARG(type1, type2, func1, func2, arg0, arg1) \
-do { \
- if (TYPE_EQUAL((arg0), type1)) \
- func1((type1 *)(arg0), arg1); \
- else if (TYPE_EQUAL((arg0), type2)) \
- func2((type2 *)(arg0), arg1); \
- else __bad_func_type(); \
-} while (0)
+/*
+ * PICK_SEM_OP() is a small redirector to allow less typing of the lock
+ * types struct compat_semaphore, struct semaphore, at the front of the
+ * PICK_FUNCTION macro.
+ */
+#define PICK_SEM_OP(...) PICK_FUNCTION(struct compat_semaphore *, \
+ struct semaphore *, ##__VA_ARGS__)
+#define PICK_SEM_OP_RET(...) PICK_FUNCTION_RET(struct compat_semaphore *,\
+ struct semaphore *, ##__VA_ARGS__)
#define sema_init(sem, val) \
- PICK_FUNC_2ARG(struct compat_semaphore, struct semaphore, \
- compat_sema_init, rt_sema_init, sem, val)
+ PICK_SEM_OP(compat_sema_init, rt_sema_init, sem, val)
-#define init_MUTEX(sem) \
- PICK_FUNC_1ARG(struct compat_semaphore, struct semaphore, \
- compat_init_MUTEX, rt_init_MUTEX, sem)
+#define init_MUTEX(sem) PICK_SEM_OP(compat_init_MUTEX, rt_init_MUTEX, sem)
#define init_MUTEX_LOCKED(sem) \
- PICK_FUNC_1ARG(struct compat_semaphore, struct semaphore, \
- compat_init_MUTEX_LOCKED, rt_init_MUTEX_LOCKED, sem)
+ PICK_SEM_OP(compat_init_MUTEX_LOCKED, rt_init_MUTEX_LOCKED, sem)
-#define down(sem) \
- PICK_FUNC_1ARG(struct compat_semaphore, struct semaphore, \
- compat_down, rt_down, sem)
+#define down(sem) PICK_SEM_OP(compat_down, rt_down, sem)
#define down_interruptible(sem) \
- PICK_FUNC_1ARG_RET(struct compat_semaphore, struct semaphore, \
- compat_down_interruptible, rt_down_interruptible, sem)
+ PICK_SEM_OP_RET(compat_down_interruptible, rt_down_interruptible, sem)
#define down_trylock(sem) \
- PICK_FUNC_1ARG_RET(struct compat_semaphore, struct semaphore, \
- compat_down_trylock, rt_down_trylock, sem)
+ PICK_SEM_OP_RET(compat_down_trylock, rt_down_trylock, sem)
-#define up(sem) \
- PICK_FUNC_1ARG(struct compat_semaphore, struct semaphore, \
- compat_up, rt_up, sem)
+#define up(sem) PICK_SEM_OP(compat_up, rt_up, sem)
#define sem_is_locked(sem) \
- PICK_FUNC_1ARG_RET(struct compat_semaphore, struct semaphore, \
- compat_sem_is_locked, rt_sem_is_locked, sem)
+ PICK_SEM_OP_RET(compat_sem_is_locked, rt_sem_is_locked, sem)
-#define sema_count(sem) \
- PICK_FUNC_1ARG_RET(struct compat_semaphore, struct semaphore, \
- compat_sema_count, rt_sema_count, sem)
+#define sema_count(sem) PICK_SEM_OP_RET(compat_sema_count, rt_sema_count, sem)
/*
* rwsems:
@@ -272,58 +236,47 @@ extern void fastcall rt_downgrade_write(
# define rt_rwsem_is_locked(rws) (rt_mutex_is_locked(&(rws)->lock))
-#define init_rwsem(rwsem) \
- PICK_FUNC_1ARG(struct compat_rw_semaphore, struct rw_semaphore, \
- compat_init_rwsem, rt_init_rwsem, rwsem)
-
-#define down_read(rwsem) \
- PICK_FUNC_1ARG(struct compat_rw_semaphore, struct rw_semaphore, \
- compat_down_read, rt_down_read, rwsem)
+#define PICK_RWSEM_OP(...) PICK_FUNCTION(struct compat_rw_semaphore *, \
+ struct rw_semaphore *, ##__VA_ARGS__)
+#define PICK_RWSEM_OP_RET(...) PICK_FUNCTION_RET(struct compat_rw_semaphore *,\
+ struct rw_semaphore *, ##__VA_ARGS__)
+
+#define init_rwsem(rwsem) PICK_RWSEM_OP(compat_init_rwsem, rt_init_rwsem, rwsem)
+
+#define down_read(rwsem) PICK_RWSEM_OP(compat_down_read, rt_down_read, rwsem)
#define down_read_non_owner(rwsem) \
- PICK_FUNC_1ARG(struct compat_rw_semaphore, struct rw_semaphore, \
- compat_down_read_non_owner, rt_down_read_non_owner, rwsem)
+ PICK_RWSEM_OP(compat_down_read_non_owner, rt_down_read_non_owner, rwsem)
#define down_read_trylock(rwsem) \
- PICK_FUNC_1ARG_RET(struct compat_rw_semaphore, struct rw_semaphore, \
- compat_down_read_trylock, rt_down_read_trylock, rwsem)
+ PICK_RWSEM_OP_RET(compat_down_read_trylock, rt_down_read_trylock, rwsem)
-#define down_write(rwsem) \
- PICK_FUNC_1ARG(struct compat_rw_semaphore, struct rw_semaphore, \
- compat_down_write, rt_down_write, rwsem)
+#define down_write(rwsem) PICK_RWSEM_OP(compat_down_write, rt_down_write, rwsem)
#define down_read_nested(rwsem, subclass) \
- PICK_FUNC_2ARG(struct compat_rw_semaphore, struct rw_semaphore, \
- compat_down_read_nested, rt_down_read_nested, rwsem, subclass)
-
+ PICK_RWSEM_OP(compat_down_read_nested, rt_down_read_nested, \
+ rwsem, subclass)
#define down_write_nested(rwsem, subclass) \
- PICK_FUNC_2ARG(struct compat_rw_semaphore, struct rw_semaphore, \
- compat_down_write_nested, rt_down_write_nested, rwsem, subclass)
+ PICK_RWSEM_OP(compat_down_write_nested, rt_down_write_nested, \
+ rwsem, subclass)
#define down_write_trylock(rwsem) \
- PICK_FUNC_1ARG_RET(struct compat_rw_semaphore, struct rw_semaphore, \
- compat_down_write_trylock, rt_down_write_trylock, rwsem)
+ PICK_RWSEM_OP_RET(compat_down_write_trylock, rt_down_write_trylock,\
+ rwsem)
-#define up_read(rwsem) \
- PICK_FUNC_1ARG(struct compat_rw_semaphore, struct rw_semaphore, \
- compat_up_read, rt_up_read, rwsem)
+#define up_read(rwsem) PICK_RWSEM_OP(compat_up_read, rt_up_read, rwsem)
#define up_read_non_owner(rwsem) \
- PICK_FUNC_1ARG(struct compat_rw_semaphore, struct rw_semaphore, \
- compat_up_read_non_owner, rt_up_read_non_owner, rwsem)
+ PICK_RWSEM_OP(compat_up_read_non_owner, rt_up_read_non_owner, rwsem)
-#define up_write(rwsem) \
- PICK_FUNC_1ARG(struct compat_rw_semaphore, struct rw_semaphore, \
- compat_up_write, rt_up_write, rwsem)
+#define up_write(rwsem) PICK_RWSEM_OP(compat_up_write, rt_up_write, rwsem)
#define downgrade_write(rwsem) \
- PICK_FUNC_1ARG(struct compat_rw_semaphore, struct rw_semaphore, \
- compat_downgrade_write, rt_downgrade_write, rwsem)
+ PICK_RWSEM_OP(compat_downgrade_write, rt_downgrade_write, rwsem)
#define rwsem_is_locked(rwsem) \
- PICK_FUNC_1ARG_RET(struct compat_rw_semaphore, struct rw_semaphore, \
- compat_rwsem_is_locked, rt_rwsem_is_locked, rwsem)
+ PICK_RWSEM_OP_RET(compat_rwsem_is_locked, rt_rwsem_is_locked, rwsem)
#endif /* CONFIG_PREEMPT_RT */
--
^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH -rt 2/8] spinlocks/rwlocks: use PICK_FUNCTION()
2007-08-28 21:37 [PATCH -rt 1/8] introduce PICK_FUNCTION Daniel Walker
@ 2007-08-28 21:37 ` Daniel Walker
2007-08-28 21:37 ` [PATCH -rt 3/8] seqlocks: use PICK_FUNCTION Daniel Walker
` (6 subsequent siblings)
7 siblings, 0 replies; 10+ messages in thread
From: Daniel Walker @ 2007-08-28 21:37 UTC (permalink / raw)
To: mingo; +Cc: mingo, linux-kernel, linux-rt-users
[-- Attachment #1: pickop-spinlock-rwlocks.patch --]
[-- Type: text/plain, Size: 20153 bytes --]
Reaplace old PICK_OP style macros with the new PICK_FUNCTION macro.
Signed-off-by: Daniel Walker <dwalker@mvista.com>
---
include/linux/sched.h | 13 -
include/linux/spinlock.h | 345 ++++++++++++++---------------------------------
kernel/rtmutex.c | 2
lib/dec_and_lock.c | 2
4 files changed, 111 insertions(+), 251 deletions(-)
Index: linux-2.6.22/include/linux/sched.h
===================================================================
--- linux-2.6.22.orig/include/linux/sched.h
+++ linux-2.6.22/include/linux/sched.h
@@ -2022,17 +2022,8 @@ extern int __cond_resched_raw_spinlock(r
extern int __cond_resched_spinlock(spinlock_t *spinlock);
#define cond_resched_lock(lock) \
-({ \
- int __ret; \
- \
- if (TYPE_EQUAL((lock), raw_spinlock_t)) \
- __ret = __cond_resched_raw_spinlock((raw_spinlock_t *)lock);\
- else if (TYPE_EQUAL(lock, spinlock_t)) \
- __ret = __cond_resched_spinlock((spinlock_t *)lock); \
- else __ret = __bad_spinlock_type(); \
- \
- __ret; \
-})
+ PICK_SPIN_OP_RET(__cond_resched_raw_spinlock, __cond_resched_spinlock,\
+ lock)
extern int cond_resched_softirq(void);
extern int cond_resched_softirq_context(void);
Index: linux-2.6.22/include/linux/spinlock.h
===================================================================
--- linux-2.6.22.orig/include/linux/spinlock.h
+++ linux-2.6.22/include/linux/spinlock.h
@@ -91,6 +91,7 @@
#include <linux/stringify.h>
#include <linux/bottom_half.h>
#include <linux/irqflags.h>
+#include <linux/pickop.h>
#include <asm/system.h>
@@ -162,7 +163,7 @@ extern void __lockfunc rt_spin_unlock_wa
extern int __lockfunc
rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
extern int __lockfunc rt_spin_trylock(spinlock_t *lock);
-extern int _atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock);
+extern int _atomic_dec_and_spin_lock(spinlock_t *lock, atomic_t *atomic);
/*
* lockdep-less calls, for derived types like rwlock:
@@ -243,54 +244,6 @@ do { \
# define _spin_trylock_irqsave(l,f) TSNBCONRT(l)
#endif
-#undef TYPE_EQUAL
-#define TYPE_EQUAL(lock, type) \
- __builtin_types_compatible_p(typeof(lock), type *)
-
-#define PICK_OP(op, lock) \
-do { \
- if (TYPE_EQUAL((lock), raw_spinlock_t)) \
- __spin##op((raw_spinlock_t *)(lock)); \
- else if (TYPE_EQUAL(lock, spinlock_t)) \
- _spin##op((spinlock_t *)(lock)); \
- else __bad_spinlock_type(); \
-} while (0)
-
-#define PICK_OP_RET(op, lock...) \
-({ \
- unsigned long __ret; \
- \
- if (TYPE_EQUAL((lock), raw_spinlock_t)) \
- __ret = __spin##op((raw_spinlock_t *)(lock)); \
- else if (TYPE_EQUAL(lock, spinlock_t)) \
- __ret = _spin##op((spinlock_t *)(lock)); \
- else __ret = __bad_spinlock_type(); \
- \
- __ret; \
-})
-
-#define PICK_OP2(op, lock, flags) \
-do { \
- if (TYPE_EQUAL((lock), raw_spinlock_t)) \
- __spin##op((raw_spinlock_t *)(lock), flags); \
- else if (TYPE_EQUAL(lock, spinlock_t)) \
- _spin##op((spinlock_t *)(lock), flags); \
- else __bad_spinlock_type(); \
-} while (0)
-
-#define PICK_OP2_RET(op, lock, flags) \
-({ \
- unsigned long __ret; \
- \
- if (TYPE_EQUAL((lock), raw_spinlock_t)) \
- __ret = __spin##op((raw_spinlock_t *)(lock), flags); \
- else if (TYPE_EQUAL(lock, spinlock_t)) \
- __ret = _spin##op((spinlock_t *)(lock), flags); \
- else __bad_spinlock_type(); \
- \
- __ret; \
-})
-
extern void __lockfunc rt_write_lock(rwlock_t *rwlock);
extern void __lockfunc rt_read_lock(rwlock_t *rwlock);
extern int __lockfunc rt_write_trylock(rwlock_t *rwlock);
@@ -349,76 +302,10 @@ do { \
# define _read_unlock_irqrestore(rwl, f) rt_read_unlock(rwl)
# define _write_unlock_irqrestore(rwl, f) rt_write_unlock(rwl)
-#define __PICK_RW_OP(optype, op, lock) \
-do { \
- if (TYPE_EQUAL((lock), raw_rwlock_t)) \
- __##optype##op((raw_rwlock_t *)(lock)); \
- else if (TYPE_EQUAL(lock, rwlock_t)) \
- ##op((rwlock_t *)(lock)); \
- else __bad_rwlock_type(); \
-} while (0)
-
-#define PICK_RW_OP(optype, op, lock) \
-do { \
- if (TYPE_EQUAL((lock), raw_rwlock_t)) \
- __##optype##op((raw_rwlock_t *)(lock)); \
- else if (TYPE_EQUAL(lock, rwlock_t)) \
- _##optype##op((rwlock_t *)(lock)); \
- else __bad_rwlock_type(); \
-} while (0)
-
-#define __PICK_RW_OP_RET(optype, op, lock...) \
-({ \
- unsigned long __ret; \
- \
- if (TYPE_EQUAL((lock), raw_rwlock_t)) \
- __ret = __##optype##op((raw_rwlock_t *)(lock)); \
- else if (TYPE_EQUAL(lock, rwlock_t)) \
- __ret = _##optype##op((rwlock_t *)(lock)); \
- else __ret = __bad_rwlock_type(); \
- \
- __ret; \
-})
-
-#define PICK_RW_OP_RET(optype, op, lock...) \
-({ \
- unsigned long __ret; \
- \
- if (TYPE_EQUAL((lock), raw_rwlock_t)) \
- __ret = __##optype##op((raw_rwlock_t *)(lock)); \
- else if (TYPE_EQUAL(lock, rwlock_t)) \
- __ret = _##optype##op((rwlock_t *)(lock)); \
- else __ret = __bad_rwlock_type(); \
- \
- __ret; \
-})
-
-#define PICK_RW_OP2(optype, op, lock, flags) \
-do { \
- if (TYPE_EQUAL((lock), raw_rwlock_t)) \
- __##optype##op((raw_rwlock_t *)(lock), flags); \
- else if (TYPE_EQUAL(lock, rwlock_t)) \
- _##optype##op((rwlock_t *)(lock), flags); \
- else __bad_rwlock_type(); \
-} while (0)
-
-#define PICK_RW_OP2_RET(optype, op, lock, flags) \
-({ \
- unsigned long __ret; \
- \
- if (TYPE_EQUAL((lock), raw_rwlock_t)) \
- __ret = __##optype##op((raw_rwlock_t *)(lock), flags); \
- else if (TYPE_EQUAL(lock, rwlock_t)) \
- __ret = _##optype##op((rwlock_t *)(lock), flags); \
- else __bad_rwlock_type(); \
- \
- __ret; \
-})
-
#ifdef CONFIG_DEBUG_SPINLOCK
extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
struct lock_class_key *key);
-# define _raw_spin_lock_init(lock) \
+# define _raw_spin_lock_init(lock, name, file, line) \
do { \
static struct lock_class_key __key; \
\
@@ -428,25 +315,28 @@ do { \
#else
#define __raw_spin_lock_init(lock) \
do { *(lock) = RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
-# define _raw_spin_lock_init(lock) __raw_spin_lock_init(lock)
+# define _raw_spin_lock_init(lock, name, file, line) __raw_spin_lock_init(lock)
#endif
-#define PICK_OP_INIT(op, lock) \
-do { \
- if (TYPE_EQUAL((lock), raw_spinlock_t)) \
- _raw_spin##op((raw_spinlock_t *)(lock)); \
- else if (TYPE_EQUAL(lock, spinlock_t)) \
- _spin##op((spinlock_t *)(lock), #lock, __FILE__, __LINE__); \
- else __bad_spinlock_type(); \
-} while (0)
-
+/*
+ * PICK_SPIN_OP()/PICK_RW_OP() are simple redirectors for PICK_FUNCTION
+ */
+#define PICK_SPIN_OP(...) \
+ PICK_FUNCTION(raw_spinlock_t *, spinlock_t *, ##__VA_ARGS__)
+#define PICK_SPIN_OP_RET(...) \
+ PICK_FUNCTION_RET(raw_spinlock_t *, spinlock_t *, ##__VA_ARGS__)
+#define PICK_RW_OP(...) PICK_FUNCTION(raw_rwlock_t *, rwlock_t *, ##__VA_ARGS__)
+#define PICK_RW_OP_RET(...) \
+ PICK_FUNCTION_RET(raw_rwlock_t *, rwlock_t *, ##__VA_ARGS__)
-#define spin_lock_init(lock) PICK_OP_INIT(_lock_init, lock)
+#define spin_lock_init(lock) \
+ PICK_SPIN_OP(_raw_spin_lock_init, _spin_lock_init, lock, #lock, \
+ __FILE__, __LINE__)
#ifdef CONFIG_DEBUG_SPINLOCK
extern void __raw_rwlock_init(raw_rwlock_t *lock, const char *name,
struct lock_class_key *key);
-# define _raw_rwlock_init(lock) \
+# define _raw_rwlock_init(lock, name, file, line) \
do { \
static struct lock_class_key __key; \
\
@@ -455,83 +345,82 @@ do { \
#else
#define __raw_rwlock_init(lock) \
do { *(lock) = RAW_RW_LOCK_UNLOCKED(lock); } while (0)
-# define _raw_rwlock_init(lock) __raw_rwlock_init(lock)
+# define _raw_rwlock_init(lock, name, file, line) __raw_rwlock_init(lock)
#endif
-#define __PICK_RW_OP_INIT(optype, op, lock) \
-do { \
- if (TYPE_EQUAL((lock), raw_rwlock_t)) \
- _raw_##optype##op((raw_rwlock_t *)(lock)); \
- else if (TYPE_EQUAL(lock, rwlock_t)) \
- _##optype##op((rwlock_t *)(lock), #lock, __FILE__, __LINE__);\
- else __bad_spinlock_type(); \
-} while (0)
-
-#define rwlock_init(lock) __PICK_RW_OP_INIT(rwlock, _init, lock)
+#define rwlock_init(lock) \
+ PICK_RW_OP(_raw_rwlock_init, _rwlock_init, lock, #lock, \
+ __FILE__, __LINE__)
#define __spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
-#define spin_is_locked(lock) PICK_OP_RET(_is_locked, lock)
+#define spin_is_locked(lock) \
+ PICK_SPIN_OP_RET(__spin_is_locked, _spin_is_locked, lock)
#define __spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock)
-#define spin_unlock_wait(lock) PICK_OP(_unlock_wait, lock)
+#define spin_unlock_wait(lock) \
+ PICK_SPIN_OP(__spin_unlock_wait, _spin_unlock_wait, lock)
+
/*
* Define the various spin_lock and rw_lock methods. Note we define these
* regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
* methods are defined as nops in the case they are not required.
*/
-// #define spin_trylock(lock) _spin_trylock(lock)
-#define spin_trylock(lock) __cond_lock(lock, PICK_OP_RET(_trylock, lock))
+#define spin_trylock(lock) \
+ __cond_lock(lock, PICK_SPIN_OP_RET(__spin_trylock, _spin_trylock, lock))
-//#define read_trylock(lock) _read_trylock(lock)
-#define read_trylock(lock) __cond_lock(lock, PICK_RW_OP_RET(read, _trylock, lock))
+#define read_trylock(lock) \
+ __cond_lock(lock, PICK_RW_OP_RET(__read_trylock, _read_trylock, lock))
-//#define write_trylock(lock) _write_trylock(lock)
-#define write_trylock(lock) __cond_lock(lock, PICK_RW_OP_RET(write, _trylock, lock))
+#define write_trylock(lock) \
+ __cond_lock(lock, PICK_RW_OP_RET(__write_trylock, _write_trylock, lock))
#define write_trylock_irqsave(lock, flags) \
- __cond_lock(lock, PICK_RW_OP2_RET(write, _trylock_irqsave, lock, &flags))
+ __cond_lock(lock, PICK_RW_OP_RET(__write_trylock_irqsave, \
+ _write_trylock_irqsave, lock, &flags))
#define __spin_can_lock(lock) __raw_spin_can_lock(&(lock)->raw_lock)
#define __read_can_lock(lock) __raw_read_can_lock(&(lock)->raw_lock)
#define __write_can_lock(lock) __raw_write_can_lock(&(lock)->raw_lock)
#define spin_can_lock(lock) \
- __cond_lock(lock, PICK_OP_RET(_can_lock, lock))
+ __cond_lock(lock, PICK_SPIN_OP_RET(__spin_can_lock, _spin_can_lock,\
+ lock))
#define read_can_lock(lock) \
- __cond_lock(lock, PICK_RW_OP_RET(read, _can_lock, lock))
+ __cond_lock(lock, PICK_RW_OP_RET(__read_can_lock, _read_can_lock, lock))
#define write_can_lock(lock) \
- __cond_lock(lock, PICK_RW_OP_RET(write, _can_lock, lock))
+ __cond_lock(lock, PICK_RW_OP_RET(__write_can_lock, _write_can_lock,\
+ lock))
-// #define spin_lock(lock) _spin_lock(lock)
-#define spin_lock(lock) PICK_OP(_lock, lock)
+#define spin_lock(lock) PICK_SPIN_OP(__spin_lock, _spin_lock, lock)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define spin_lock_nested(lock, subclass) PICK_OP2(_lock_nested, lock, subclass)
+# define spin_lock_nested(lock, subclass) \
+ PICK_SPIN_OP(__spin_lock_nested, _spin_lock_nested, lock, subclass)
#else
# define spin_lock_nested(lock, subclass) spin_lock(lock)
#endif
-//#define write_lock(lock) _write_lock(lock)
-#define write_lock(lock) PICK_RW_OP(write, _lock, lock)
+#define write_lock(lock) PICK_RW_OP(__write_lock, _write_lock, lock)
-// #define read_lock(lock) _read_lock(lock)
-#define read_lock(lock) PICK_RW_OP(read, _lock, lock)
+#define read_lock(lock) PICK_RW_OP(__read_lock, _read_lock, lock)
# define spin_lock_irqsave(lock, flags) \
do { \
BUILD_CHECK_IRQ_FLAGS(flags); \
- flags = PICK_OP_RET(_lock_irqsave, lock); \
+ flags = PICK_SPIN_OP_RET(__spin_lock_irqsave, _spin_lock_irqsave, \
+ lock); \
} while (0)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
BUILD_CHECK_IRQ_FLAGS(flags); \
- flags = PICK_OP2_RET(_lock_irqsave_nested, lock, subclass); \
+ flags = PICK_SPIN_OP_RET(__spin_lock_irqsave_nested, \
+ _spin_lock_irqsave_nested, lock, subclass); \
} while (0)
#else
# define spin_lock_irqsave_nested(lock, flags, subclass) \
@@ -541,112 +430,92 @@ do { \
# define read_lock_irqsave(lock, flags) \
do { \
BUILD_CHECK_IRQ_FLAGS(flags); \
- flags = PICK_RW_OP_RET(read, _lock_irqsave, lock); \
+ flags = PICK_RW_OP_RET(__read_lock_irqsave, _read_lock_irqsave, lock);\
} while (0)
# define write_lock_irqsave(lock, flags) \
do { \
BUILD_CHECK_IRQ_FLAGS(flags); \
- flags = PICK_RW_OP_RET(write, _lock_irqsave, lock); \
+ flags = PICK_RW_OP_RET(__write_lock_irqsave, _write_lock_irqsave,lock);\
} while (0)
-// #define spin_lock_irq(lock) _spin_lock_irq(lock)
-// #define spin_lock_bh(lock) _spin_lock_bh(lock)
-#define spin_lock_irq(lock) PICK_OP(_lock_irq, lock)
-#define spin_lock_bh(lock) PICK_OP(_lock_bh, lock)
-
-// #define read_lock_irq(lock) _read_lock_irq(lock)
-// #define read_lock_bh(lock) _read_lock_bh(lock)
-#define read_lock_irq(lock) PICK_RW_OP(read, _lock_irq, lock)
-#define read_lock_bh(lock) PICK_RW_OP(read, _lock_bh, lock)
-
-// #define write_lock_irq(lock) _write_lock_irq(lock)
-// #define write_lock_bh(lock) _write_lock_bh(lock)
-#define write_lock_irq(lock) PICK_RW_OP(write, _lock_irq, lock)
-#define write_lock_bh(lock) PICK_RW_OP(write, _lock_bh, lock)
-
-// #define spin_unlock(lock) _spin_unlock(lock)
-// #define write_unlock(lock) _write_unlock(lock)
-// #define read_unlock(lock) _read_unlock(lock)
-#define spin_unlock(lock) PICK_OP(_unlock, lock)
-#define read_unlock(lock) PICK_RW_OP(read, _unlock, lock)
-#define write_unlock(lock) PICK_RW_OP(write, _unlock, lock)
+#define spin_lock_irq(lock) PICK_SPIN_OP(__spin_lock_irq, _spin_lock_irq, lock)
-// #define spin_unlock(lock) _spin_unlock_no_resched(lock)
-#define spin_unlock_no_resched(lock) \
- PICK_OP(_unlock_no_resched, lock)
+#define spin_lock_bh(lock) PICK_SPIN_OP(__spin_lock_bh, _spin_lock_bh, lock)
-//#define spin_unlock_irqrestore(lock, flags)
-// _spin_unlock_irqrestore(lock, flags)
-//#define spin_unlock_irq(lock) _spin_unlock_irq(lock)
-//#define spin_unlock_bh(lock) _spin_unlock_bh(lock)
-#define spin_unlock_irqrestore(lock, flags) \
-do { \
- BUILD_CHECK_IRQ_FLAGS(flags); \
- PICK_OP2(_unlock_irqrestore, lock, flags); \
-} while (0)
+#define read_lock_irq(lock) PICK_RW_OP(__read_lock_irq, _read_lock_irq, lock)
-#define spin_unlock_irq(lock) PICK_OP(_unlock_irq, lock)
-#define spin_unlock_bh(lock) PICK_OP(_unlock_bh, lock)
+#define read_lock_bh(lock) PICK_RW_OP(__read_lock_bh, _read_lock_bh, lock)
-// #define read_unlock_irqrestore(lock, flags)
-// _read_unlock_irqrestore(lock, flags)
-// #define read_unlock_irq(lock) _read_unlock_irq(lock)
-// #define read_unlock_bh(lock) _read_unlock_bh(lock)
-#define read_unlock_irqrestore(lock, flags) \
-do { \
- BUILD_CHECK_IRQ_FLAGS(flags); \
- PICK_RW_OP2(read, _unlock_irqrestore, lock, flags); \
+#define write_lock_irq(lock) PICK_RW_OP(__write_lock_irq, _write_lock_irq, lock)
+
+#define write_lock_bh(lock) PICK_RW_OP(__write_lock_bh, _write_lock_bh, lock)
+
+#define spin_unlock(lock) PICK_SPIN_OP(__spin_unlock, _spin_unlock, lock)
+
+#define read_unlock(lock) PICK_RW_OP(__read_unlock, _read_unlock, lock)
+
+#define write_unlock(lock) PICK_RW_OP(__write_unlock, _write_unlock, lock)
+
+#define spin_unlock_no_resched(lock) \
+ PICK_SPIN_OP(__spin_unlock_no_resched, _spin_unlock_no_resched, lock)
+
+#define spin_unlock_irqrestore(lock, flags) \
+do { \
+ BUILD_CHECK_IRQ_FLAGS(flags); \
+ PICK_SPIN_OP(__spin_unlock_irqrestore, _spin_unlock_irqrestore, \
+ lock, flags); \
} while (0)
-#define read_unlock_irq(lock) PICK_RW_OP(read, _unlock_irq, lock)
-#define read_unlock_bh(lock) PICK_RW_OP(read, _unlock_bh, lock)
+#define spin_unlock_irq(lock) \
+ PICK_SPIN_OP(__spin_unlock_irq, _spin_unlock_irq, lock)
+#define spin_unlock_bh(lock) \
+ PICK_SPIN_OP(__spin_unlock_bh, _spin_unlock_bh, lock)
-// #define write_unlock_irqrestore(lock, flags)
-// _write_unlock_irqrestore(lock, flags)
-// #define write_unlock_irq(lock) _write_unlock_irq(lock)
-// #define write_unlock_bh(lock) _write_unlock_bh(lock)
-#define write_unlock_irqrestore(lock, flags) \
-do { \
- BUILD_CHECK_IRQ_FLAGS(flags); \
- PICK_RW_OP2(write, _unlock_irqrestore, lock, flags); \
+#define read_unlock_irqrestore(lock, flags) \
+do { \
+ BUILD_CHECK_IRQ_FLAGS(flags); \
+ PICK_RW_OP(__read_unlock_irqrestore, _read_unlock_irqrestore, \
+ lock, flags); \
} while (0)
-#define write_unlock_irq(lock) PICK_RW_OP(write, _unlock_irq, lock)
-#define write_unlock_bh(lock) PICK_RW_OP(write, _unlock_bh, lock)
-// #define spin_trylock_bh(lock) _spin_trylock_bh(lock)
-#define spin_trylock_bh(lock) __cond_lock(lock, PICK_OP_RET(_trylock_bh, lock))
+#define read_unlock_irq(lock) \
+ PICK_RW_OP(__read_unlock_irq, _read_unlock_irq, lock)
+#define read_unlock_bh(lock) PICK_RW_OP(__read_unlock_bh, _read_unlock_bh, lock)
-// #define spin_trylock_irq(lock)
+#define write_unlock_irqrestore(lock, flags) \
+do { \
+ BUILD_CHECK_IRQ_FLAGS(flags); \
+ PICK_RW_OP(__write_unlock_irqrestore, _write_unlock_irqrestore, \
+ lock, flags); \
+} while (0)
+#define write_unlock_irq(lock) \
+ PICK_RW_OP(__write_unlock_irq, _write_unlock_irq, lock)
-#define spin_trylock_irq(lock) __cond_lock(lock, PICK_OP_RET(_trylock_irq, lock))
+#define write_unlock_bh(lock) \
+ PICK_RW_OP(__write_unlock_bh, _write_unlock_bh, lock)
-// #define spin_trylock_irqsave(lock, flags)
+#define spin_trylock_bh(lock) \
+ __cond_lock(lock, PICK_SPIN_OP_RET(__spin_trylock_bh, _spin_trylock_bh,\
+ lock))
+
+#define spin_trylock_irq(lock) \
+ __cond_lock(lock, PICK_SPIN_OP_RET(__spin_trylock_irq, \
+ __spin_trylock_irq, lock))
#define spin_trylock_irqsave(lock, flags) \
- __cond_lock(lock, PICK_OP2_RET(_trylock_irqsave, lock, &flags))
+ __cond_lock(lock, PICK_SPIN_OP_RET(__spin_trylock_irqsave, \
+ _spin_trylock_irqsave, lock, &flags))
/* "lock on reference count zero" */
#ifndef ATOMIC_DEC_AND_LOCK
# include <asm/atomic.h>
- extern int __atomic_dec_and_spin_lock(atomic_t *atomic, raw_spinlock_t *lock);
+ extern int __atomic_dec_and_spin_lock(raw_spinlock_t *lock, atomic_t *atomic);
#endif
#define atomic_dec_and_lock(atomic, lock) \
-__cond_lock(lock, ({ \
- unsigned long __ret; \
- \
- if (TYPE_EQUAL(lock, raw_spinlock_t)) \
- __ret = __atomic_dec_and_spin_lock(atomic, \
- (raw_spinlock_t *)(lock)); \
- else if (TYPE_EQUAL(lock, spinlock_t)) \
- __ret = _atomic_dec_and_spin_lock(atomic, \
- (spinlock_t *)(lock)); \
- else __ret = __bad_spinlock_type(); \
- \
- __ret; \
-}))
-
+ __cond_lock(lock, PICK_SPIN_OP_RET(__atomic_dec_and_spin_lock, \
+ _atomic_dec_and_spin_lock, lock, atomic))
/*
* bit-based spin_lock()
Index: linux-2.6.22/kernel/rtmutex.c
===================================================================
--- linux-2.6.22.orig/kernel/rtmutex.c
+++ linux-2.6.22/kernel/rtmutex.c
@@ -857,7 +857,7 @@ int __lockfunc rt_spin_trylock_irqsave(s
}
EXPORT_SYMBOL(rt_spin_trylock_irqsave);
-int _atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock)
+int _atomic_dec_and_spin_lock(spinlock_t *lock, atomic_t *atomic)
{
/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
if (atomic_add_unless(atomic, -1, 1))
Index: linux-2.6.22/lib/dec_and_lock.c
===================================================================
--- linux-2.6.22.orig/lib/dec_and_lock.c
+++ linux-2.6.22/lib/dec_and_lock.c
@@ -17,7 +17,7 @@
* because the spin-lock and the decrement must be
* "atomic".
*/
-int __atomic_dec_and_spin_lock(atomic_t *atomic, raw_spinlock_t *lock)
+int __atomic_dec_and_spin_lock(raw_spinlock_t *lock, atomic_t *atomic)
{
#ifdef CONFIG_SMP
/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
--
^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH -rt 3/8] seqlocks: use PICK_FUNCTION
2007-08-28 21:37 [PATCH -rt 1/8] introduce PICK_FUNCTION Daniel Walker
2007-08-28 21:37 ` [PATCH -rt 2/8] spinlocks/rwlocks: use PICK_FUNCTION() Daniel Walker
@ 2007-08-28 21:37 ` Daniel Walker
2007-08-28 21:37 ` [PATCH -rt 4/8] fork: desched_thread comment rework Daniel Walker
` (5 subsequent siblings)
7 siblings, 0 replies; 10+ messages in thread
From: Daniel Walker @ 2007-08-28 21:37 UTC (permalink / raw)
To: mingo; +Cc: mingo, linux-kernel, linux-rt-users
[-- Attachment #1: pickop-seqlocks.patch --]
[-- Type: text/plain, Size: 9094 bytes --]
Replace the old PICK_OP style macros with PICK_FUNCTION. Although,
seqlocks has some alien code, which I also replaced as can be seen
from the line count below.
Signed-off-by: Daniel Walker <dwalker@mvista.com>
---
include/linux/pickop.h | 4
include/linux/seqlock.h | 235 +++++++++++++++++++++++++++---------------------
2 files changed, 135 insertions(+), 104 deletions(-)
Index: linux-2.6.22/include/linux/pickop.h
===================================================================
--- linux-2.6.22.orig/include/linux/pickop.h
+++ linux-2.6.22/include/linux/pickop.h
@@ -1,10 +1,6 @@
#ifndef _LINUX_PICKOP_H
#define _LINUX_PICKOP_H
-#undef TYPE_EQUAL
-#define TYPE_EQUAL(var, type) \
- __builtin_types_compatible_p(typeof(var), type *)
-
#undef PICK_TYPE_EQUAL
#define PICK_TYPE_EQUAL(var, type) \
__builtin_types_compatible_p(typeof(var), type)
Index: linux-2.6.22/include/linux/seqlock.h
===================================================================
--- linux-2.6.22.orig/include/linux/seqlock.h
+++ linux-2.6.22/include/linux/seqlock.h
@@ -90,6 +90,12 @@ static inline void __write_seqlock(seqlo
smp_wmb();
}
+static __always_inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
+{
+ __write_seqlock(sl);
+ return 0;
+}
+
static inline void __write_sequnlock(seqlock_t *sl)
{
smp_wmb();
@@ -97,6 +103,8 @@ static inline void __write_sequnlock(seq
spin_unlock(&sl->lock);
}
+#define __write_sequnlock_irqrestore(sl, flags) __write_sequnlock(sl)
+
static inline int __write_tryseqlock(seqlock_t *sl)
{
int ret = spin_trylock(&sl->lock);
@@ -149,6 +157,28 @@ static __always_inline void __write_seql
smp_wmb();
}
+static __always_inline unsigned long
+__write_seqlock_irqsave_raw(raw_seqlock_t *sl)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __write_seqlock_raw(sl);
+ return flags;
+}
+
+static __always_inline void __write_seqlock_irq_raw(raw_seqlock_t *sl)
+{
+ local_irq_disable();
+ __write_seqlock_raw(sl);
+}
+
+static __always_inline void __write_seqlock_bh_raw(raw_seqlock_t *sl)
+{
+ local_bh_disable();
+ __write_seqlock_raw(sl);
+}
+
static __always_inline void __write_sequnlock_raw(raw_seqlock_t *sl)
{
smp_wmb();
@@ -156,6 +186,27 @@ static __always_inline void __write_sequ
spin_unlock(&sl->lock);
}
+static __always_inline void
+__write_sequnlock_irqrestore_raw(raw_seqlock_t *sl, unsigned long flags)
+{
+ __write_sequnlock_raw(sl);
+ local_irq_restore(flags);
+ preempt_check_resched();
+}
+
+static __always_inline void __write_sequnlock_irq_raw(raw_seqlock_t *sl)
+{
+ __write_sequnlock_raw(sl);
+ local_irq_enable();
+ preempt_check_resched();
+}
+
+static __always_inline void __write_sequnlock_bh_raw(raw_seqlock_t *sl)
+{
+ __write_sequnlock_raw(sl);
+ local_bh_enable();
+}
+
static __always_inline int __write_tryseqlock_raw(raw_seqlock_t *sl)
{
int ret = spin_trylock(&sl->lock);
@@ -182,60 +233,93 @@ static __always_inline int __read_seqret
extern int __bad_seqlock_type(void);
-#define PICK_SEQOP(op, lock) \
+/*
+ * PICK_SEQ_OP() is a small redirector to allow less typing of the lock
+ * types raw_seqlock_t, seqlock_t, at the front of the PICK_FUNCTION
+ * macro.
+ */
+#define PICK_SEQ_OP(...) \
+ PICK_FUNCTION(raw_seqlock_t *, seqlock_t *, ##__VA_ARGS__)
+#define PICK_SEQ_OP_RET(...) \
+ PICK_FUNCTION_RET(raw_seqlock_t *, seqlock_t *, ##__VA_ARGS__)
+
+#define write_seqlock(sl) PICK_SEQ_OP(__write_seqlock_raw, __write_seqlock, sl)
+
+#define write_sequnlock(sl) \
+ PICK_SEQ_OP(__write_sequnlock_raw, __write_sequnlock, sl)
+
+#define write_tryseqlock(sl) \
+ PICK_SEQ_OP_RET(__write_tryseqlock_raw, __write_tryseqlock, sl)
+
+#define read_seqbegin(sl) \
+ PICK_SEQ_OP_RET(__read_seqbegin_raw, __read_seqbegin, sl)
+
+#define read_seqretry(sl, iv) \
+ PICK_SEQ_OP_RET(__read_seqretry_raw, __read_seqretry, sl, iv)
+
+#define write_seqlock_irqsave(lock, flags) \
do { \
- if (TYPE_EQUAL((lock), raw_seqlock_t)) \
- op##_raw((raw_seqlock_t *)(lock)); \
- else if (TYPE_EQUAL((lock), seqlock_t)) \
- op((seqlock_t *)(lock)); \
- else __bad_seqlock_type(); \
+ flags = PICK_SEQ_OP_RET(__write_seqlock_irqsave_raw, \
+ __write_seqlock_irqsave, lock); \
} while (0)
-#define PICK_SEQOP_RET(op, lock) \
-({ \
- unsigned long __ret; \
- \
- if (TYPE_EQUAL((lock), raw_seqlock_t)) \
- __ret = op##_raw((raw_seqlock_t *)(lock)); \
- else if (TYPE_EQUAL((lock), seqlock_t)) \
- __ret = op((seqlock_t *)(lock)); \
- else __ret = __bad_seqlock_type(); \
- \
- __ret; \
-})
-
-#define PICK_SEQOP_CONST_RET(op, lock) \
-({ \
- unsigned long __ret; \
- \
- if (TYPE_EQUAL((lock), raw_seqlock_t)) \
- __ret = op##_raw((const raw_seqlock_t *)(lock));\
- else if (TYPE_EQUAL((lock), seqlock_t)) \
- __ret = op((seqlock_t *)(lock)); \
- else __ret = __bad_seqlock_type(); \
- \
- __ret; \
-})
-
-#define PICK_SEQOP2_CONST_RET(op, lock, arg) \
- ({ \
- unsigned long __ret; \
- \
- if (TYPE_EQUAL((lock), raw_seqlock_t)) \
- __ret = op##_raw((const raw_seqlock_t *)(lock), (arg)); \
- else if (TYPE_EQUAL((lock), seqlock_t)) \
- __ret = op((seqlock_t *)(lock), (arg)); \
- else __ret = __bad_seqlock_type(); \
- \
- __ret; \
-})
-
-
-#define write_seqlock(sl) PICK_SEQOP(__write_seqlock, sl)
-#define write_sequnlock(sl) PICK_SEQOP(__write_sequnlock, sl)
-#define write_tryseqlock(sl) PICK_SEQOP_RET(__write_tryseqlock, sl)
-#define read_seqbegin(sl) PICK_SEQOP_CONST_RET(__read_seqbegin, sl)
-#define read_seqretry(sl, iv) PICK_SEQOP2_CONST_RET(__read_seqretry, sl, iv)
+#define write_seqlock_irq(lock) \
+ PICK_SEQ_OP(__write_seqlock_irq_raw, __write_seqlock, lock)
+
+#define write_seqlock_bh(lock) \
+ PICK_SEQ_OP(__write_seqlock_bh_raw, __write_seqlock, lock)
+
+#define write_sequnlock_irqrestore(lock, flags) \
+ PICK_SEQ_OP(__write_sequnlock_irqrestore_raw, \
+ __write_sequnlock_irqrestore, lock, flags)
+
+#define write_sequnlock_bh(lock) \
+ PICK_SEQ_OP(__write_sequnlock_bh_raw, __write_sequnlock, lock)
+
+#define write_sequnlock_irq(lock) \
+ PICK_SEQ_OP(__write_sequnlock_irq_raw, __write_sequnlock, lock)
+
+static __always_inline
+unsigned long __read_seqbegin_irqsave_raw(raw_seqlock_t *sl)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __read_seqbegin_raw(sl);
+ return flags;
+}
+
+static __always_inline unsigned long __read_seqbegin_irqsave(seqlock_t *sl)
+{
+ __read_seqbegin(sl);
+ return 0;
+}
+
+#define read_seqbegin_irqsave(lock, flags) \
+do { \
+ flags = PICK_SEQ_OP_RET(__read_seqbegin_irqsave_raw, \
+ __read_seqbegin_irqsave, lock); \
+} while (0)
+
+static __always_inline int
+__read_seqretry_irqrestore(seqlock_t *sl, unsigned iv, unsigned long flags)
+{
+ return __read_seqretry(sl, iv);
+}
+
+static __always_inline int
+__read_seqretry_irqrestore_raw(raw_seqlock_t *sl, unsigned iv,
+ unsigned long flags)
+{
+ int ret = read_seqretry(sl, iv);
+ local_irq_restore(flags);
+ preempt_check_resched();
+ return ret;
+}
+
+#define read_seqretry_irqrestore(lock, iv, flags) \
+ PICK_SEQ_OP_RET(__read_seqretry_irqrestore_raw, \
+ __read_seqretry_irqrestore, lock, iv, flags)
/*
* Version using sequence counter only.
@@ -286,53 +370,4 @@ static inline void write_seqcount_end(se
smp_wmb();
s->sequence++;
}
-
-#define PICK_IRQOP(op, lock) \
-do { \
- if (TYPE_EQUAL((lock), raw_seqlock_t)) \
- op(); \
- else if (TYPE_EQUAL((lock), seqlock_t)) \
- { /* nothing */ } \
- else __bad_seqlock_type(); \
-} while (0)
-
-#define PICK_IRQOP2(op, arg, lock) \
-do { \
- if (TYPE_EQUAL((lock), raw_seqlock_t)) \
- op(arg); \
- else if (TYPE_EQUAL(lock, seqlock_t)) \
- { /* nothing */ } \
- else __bad_seqlock_type(); \
-} while (0)
-
-
-
-/*
- * Possible sw/hw IRQ protected versions of the interfaces.
- */
-#define write_seqlock_irqsave(lock, flags) \
- do { PICK_IRQOP2(local_irq_save, flags, lock); write_seqlock(lock); } while (0)
-#define write_seqlock_irq(lock) \
- do { PICK_IRQOP(local_irq_disable, lock); write_seqlock(lock); } while (0)
-#define write_seqlock_bh(lock) \
- do { PICK_IRQOP(local_bh_disable, lock); write_seqlock(lock); } while (0)
-
-#define write_sequnlock_irqrestore(lock, flags) \
- do { write_sequnlock(lock); PICK_IRQOP2(local_irq_restore, flags, lock); preempt_check_resched(); } while(0)
-#define write_sequnlock_irq(lock) \
- do { write_sequnlock(lock); PICK_IRQOP(local_irq_enable, lock); preempt_check_resched(); } while(0)
-#define write_sequnlock_bh(lock) \
- do { write_sequnlock(lock); PICK_IRQOP(local_bh_enable, lock); } while(0)
-
-#define read_seqbegin_irqsave(lock, flags) \
- ({ PICK_IRQOP2(local_irq_save, flags, lock); read_seqbegin(lock); })
-
-#define read_seqretry_irqrestore(lock, iv, flags) \
- ({ \
- int ret = read_seqretry(lock, iv); \
- PICK_IRQOP2(local_irq_restore, flags, lock); \
- preempt_check_resched(); \
- ret; \
- })
-
#endif /* __LINUX_SEQLOCK_H */
--
^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH -rt 4/8] fork: desched_thread comment rework.
2007-08-28 21:37 [PATCH -rt 1/8] introduce PICK_FUNCTION Daniel Walker
2007-08-28 21:37 ` [PATCH -rt 2/8] spinlocks/rwlocks: use PICK_FUNCTION() Daniel Walker
2007-08-28 21:37 ` [PATCH -rt 3/8] seqlocks: use PICK_FUNCTION Daniel Walker
@ 2007-08-28 21:37 ` Daniel Walker
2007-08-28 21:37 ` [PATCH -rt 5/8] latency tracing: use now() consistently Daniel Walker
` (4 subsequent siblings)
7 siblings, 0 replies; 10+ messages in thread
From: Daniel Walker @ 2007-08-28 21:37 UTC (permalink / raw)
To: mingo; +Cc: mingo, linux-kernel, linux-rt-users
[-- Attachment #1: fix-ia64-comment-in-desched-thread.patch --]
[-- Type: text/plain, Size: 807 bytes --]
Lines are too long..
Signed-off-by: Daniel Walker <dwalker@mvista.com>
---
kernel/fork.c | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
Index: linux-2.6.22/kernel/fork.c
===================================================================
--- linux-2.6.22.orig/kernel/fork.c
+++ linux-2.6.22/kernel/fork.c
@@ -1787,8 +1787,10 @@ static int desched_thread(void * __bind_
continue;
schedule();
- /* This must be called from time to time on ia64, and is a no-op on other archs.
- * Used to be in cpu_idle(), but with the new -rt semantics it can't stay there.
+ /*
+ * This must be called from time to time on ia64, and is a
+ * no-op on other archs. Used to be in cpu_idle(), but with
+ * the new -rt semantics it can't stay there.
*/
check_pgt_cache();
--
^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH -rt 5/8] latency tracing: use now() consistently
2007-08-28 21:37 [PATCH -rt 1/8] introduce PICK_FUNCTION Daniel Walker
` (2 preceding siblings ...)
2007-08-28 21:37 ` [PATCH -rt 4/8] fork: desched_thread comment rework Daniel Walker
@ 2007-08-28 21:37 ` Daniel Walker
2007-08-28 21:37 ` [PATCH -rt 6/8] preempt_max_latency in all modes Daniel Walker
` (3 subsequent siblings)
7 siblings, 0 replies; 10+ messages in thread
From: Daniel Walker @ 2007-08-28 21:37 UTC (permalink / raw)
To: mingo; +Cc: mingo, linux-kernel, linux-rt-users
[-- Attachment #1: use-now-consistently-in-latency-tracing.patch --]
[-- Type: text/plain, Size: 2847 bytes --]
Just get_monotonic_cycles() switched to now() ..
Signed-off-by: Daniel Walker <dwalker@mvista.com>
---
kernel/latency_trace.c | 18 +++++++++---------
1 file changed, 9 insertions(+), 9 deletions(-)
Index: linux-2.6.22/kernel/latency_trace.c
===================================================================
--- linux-2.6.22.orig/kernel/latency_trace.c
+++ linux-2.6.22/kernel/latency_trace.c
@@ -1751,7 +1751,7 @@ check_critical_timing(int cpu, struct cp
* as long as possible:
*/
T0 = tr->preempt_timestamp;
- T1 = get_monotonic_cycles();
+ T1 = now();
delta = T1-T0;
local_save_flags(flags);
@@ -1765,7 +1765,7 @@ check_critical_timing(int cpu, struct cp
* might change it (it can only get larger so the latency
* is fair to be reported):
*/
- T2 = get_monotonic_cycles();
+ T2 = now();
delta = T2-T0;
@@ -1815,7 +1815,7 @@ check_critical_timing(int cpu, struct cp
printk(" => ended at timestamp %lu: ", t1);
print_symbol("<%s>\n", tr->critical_end);
dump_stack();
- t1 = cycles_to_usecs(get_monotonic_cycles());
+ t1 = cycles_to_usecs(now());
printk(" => dump-end timestamp %lu\n\n", t1);
#endif
@@ -1825,7 +1825,7 @@ check_critical_timing(int cpu, struct cp
out:
tr->critical_sequence = max_sequence;
- tr->preempt_timestamp = get_monotonic_cycles();
+ tr->preempt_timestamp = now();
tr->early_warning = 0;
reset_trace_idx(cpu, tr);
_trace_cmdline(cpu, tr);
@@ -1874,7 +1874,7 @@ __start_critical_timing(unsigned long ei
atomic_inc(&tr->disabled);
tr->critical_sequence = max_sequence;
- tr->preempt_timestamp = get_monotonic_cycles();
+ tr->preempt_timestamp = now();
tr->critical_start = eip;
reset_trace_idx(cpu, tr);
tr->latency_type = latency_type;
@@ -2196,7 +2196,7 @@ check_wakeup_timing(struct cpu_trace *tr
goto out;
T0 = tr->preempt_timestamp;
- T1 = get_monotonic_cycles();
+ T1 = now();
/*
* Any wraparound or time warp and we are out:
*/
@@ -2314,7 +2314,7 @@ void __trace_start_sched_wakeup(struct t
// if (!atomic_read(&tr->disabled)) {
atomic_inc(&tr->disabled);
tr->critical_sequence = max_sequence;
- tr->preempt_timestamp = get_monotonic_cycles();
+ tr->preempt_timestamp = now();
tr->latency_type = WAKEUP_LATENCY;
tr->critical_start = CALLER_ADDR0;
_trace_cmdline(raw_smp_processor_id(), tr);
@@ -2426,7 +2426,7 @@ long user_trace_start(void)
atomic_inc(&tr->disabled);
tr->critical_sequence = max_sequence;
- tr->preempt_timestamp = get_monotonic_cycles();
+ tr->preempt_timestamp = now();
tr->critical_start = CALLER_ADDR0;
_trace_cmdline(cpu, tr);
atomic_dec(&tr->disabled);
@@ -2486,7 +2486,7 @@ long user_trace_stop(void)
unsigned long long tmp0;
T0 = tr->preempt_timestamp;
- T1 = get_monotonic_cycles();
+ T1 = now();
tmp0 = preempt_max_latency;
if (T1 < T0)
T0 = T1;
--
^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH -rt 6/8] preempt_max_latency in all modes
2007-08-28 21:37 [PATCH -rt 1/8] introduce PICK_FUNCTION Daniel Walker
` (3 preceding siblings ...)
2007-08-28 21:37 ` [PATCH -rt 5/8] latency tracing: use now() consistently Daniel Walker
@ 2007-08-28 21:37 ` Daniel Walker
2007-08-28 21:37 ` [PATCH -rt 7/8] latency hist: add resetting for all timing options Daniel Walker
` (2 subsequent siblings)
7 siblings, 0 replies; 10+ messages in thread
From: Daniel Walker @ 2007-08-28 21:37 UTC (permalink / raw)
To: mingo; +Cc: mingo, linux-kernel, linux-rt-users
[-- Attachment #1: add-preempt-max-latency-for-all-timing-modes.patch --]
[-- Type: text/plain, Size: 761 bytes --]
This enables the /proc/preempt_max_latency facility for timing modes,
even if event tracing is disabled. Wakeup latency was the only one
that had this feature in the past.
Signed-off-by: Daniel Walker <dwalker@mvista.com>
---
kernel/sysctl.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
Index: linux-2.6.22/kernel/sysctl.c
===================================================================
--- linux-2.6.22.orig/kernel/sysctl.c
+++ linux-2.6.22/kernel/sysctl.c
@@ -392,7 +392,7 @@ static ctl_table kern_table[] = {
.proc_handler = &proc_dointvec,
},
#endif
-#if defined(CONFIG_WAKEUP_TIMING) || defined(CONFIG_EVENT_TRACE)
+#if defined(CONFIG_CRITICAL_TIMING)
{
.ctl_name = CTL_UNNUMBERED,
.procname = "preempt_max_latency",
--
^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH -rt 7/8] latency hist: add resetting for all timing options
2007-08-28 21:37 [PATCH -rt 1/8] introduce PICK_FUNCTION Daniel Walker
` (4 preceding siblings ...)
2007-08-28 21:37 ` [PATCH -rt 6/8] preempt_max_latency in all modes Daniel Walker
@ 2007-08-28 21:37 ` Daniel Walker
2007-08-28 21:37 ` [PATCH -rt 8/8] stop critical timing in idle Daniel Walker
2007-08-28 23:44 ` [PATCH -rt 1/8] introduce PICK_FUNCTION Nick Piggin
7 siblings, 0 replies; 10+ messages in thread
From: Daniel Walker @ 2007-08-28 21:37 UTC (permalink / raw)
To: mingo; +Cc: mingo, linux-kernel, linux-rt-users, Carsten Emde
[-- Attachment #1: latency_tracing_histogram_reset.patch --]
[-- Type: text/plain, Size: 7675 bytes --]
I dropped parts of the prior reset method, and added a file called
"reset" into the /proc/latency_hist/ timing directories. It allows
any of the timing options to get their histograms reset.
I also fixed a couple of oddities in the code. Instead of creating a
file for all NR_CPUS , I just used num_possible_cpus() . I also drop
a string which only hold "CPU" and just inserted it where it was used.
Signed-off-by: Daniel Walker <dwalker@mvista.com>
---
include/linux/latency_hist.h | 1
kernel/latency_hist.c | 119 ++++++++++++++++++++++++++++---------------
kernel/latency_trace.c | 13 ----
3 files changed, 80 insertions(+), 53 deletions(-)
Index: linux-2.6.22/include/linux/latency_hist.h
===================================================================
--- linux-2.6.22.orig/include/linux/latency_hist.h
+++ linux-2.6.22/include/linux/latency_hist.h
@@ -23,7 +23,6 @@ enum {
#ifdef CONFIG_LATENCY_HIST
extern void latency_hist(int latency_type, int cpu, unsigned long latency);
-extern void latency_hist_reset(void);
# define latency_hist_flag 1
#else
# define latency_hist(a,b,c) do { (void)(cpu); } while (0)
Index: linux-2.6.22/kernel/latency_hist.c
===================================================================
--- linux-2.6.22.orig/kernel/latency_hist.c
+++ linux-2.6.22/kernel/latency_hist.c
@@ -16,6 +16,7 @@
#include <linux/latency_hist.h>
#include <asm/atomic.h>
#include <asm/div64.h>
+#include <asm/uaccess.h>
typedef struct hist_data_struct {
atomic_t hist_mode; /* 0 log, 1 don't log */
@@ -31,8 +32,6 @@ typedef struct hist_data_struct {
static struct proc_dir_entry * latency_hist_root = NULL;
static char * latency_hist_proc_dir_root = "latency_hist";
-static char * percpu_proc_name = "CPU";
-
#ifdef CONFIG_INTERRUPT_OFF_HIST
static DEFINE_PER_CPU(hist_data_t, interrupt_off_hist);
static char * interrupt_off_hist_proc_dir = "interrupt_off_latency";
@@ -56,7 +55,7 @@ static inline u64 u64_div(u64 x, u64 y)
return x;
}
-void latency_hist(int latency_type, int cpu, unsigned long latency)
+void notrace latency_hist(int latency_type, int cpu, unsigned long latency)
{
hist_data_t * my_hist;
@@ -205,6 +204,69 @@ static struct file_operations latency_hi
.release = seq_release,
};
+static void hist_reset(hist_data_t *hist)
+{
+ atomic_dec(&hist->hist_mode);
+
+ memset(hist->hist_array, 0, sizeof(hist->hist_array));
+ hist->beyond_hist_bound_samples = 0UL;
+ hist->min_lat = 0xFFFFFFFFUL;
+ hist->max_lat = 0UL;
+ hist->total_samples = 0UL;
+ hist->accumulate_lat = 0UL;
+ hist->avg_lat = 0UL;
+
+ atomic_inc(&hist->hist_mode);
+}
+
+ssize_t latency_hist_reset(struct file *file, const char __user *a, size_t size, loff_t *off)
+{
+ int cpu;
+ hist_data_t *hist;
+ struct proc_dir_entry *entry_ptr = PDE(file->f_dentry->d_inode);
+ int latency_type = (int)entry_ptr->data;
+
+ switch (latency_type) {
+
+#ifdef CONFIG_WAKEUP_LATENCY_HIST
+ case WAKEUP_LATENCY:
+ for_each_online_cpu(cpu) {
+ hist = &per_cpu(wakeup_latency_hist, cpu);
+ hist_reset(hist);
+ }
+ break;
+#endif
+
+#ifdef CONFIG_PREEMPT_OFF_HIST
+ case PREEMPT_LATENCY:
+ for_each_online_cpu(cpu) {
+ hist = &per_cpu(preempt_off_hist, cpu);
+ hist_reset(hist);
+ }
+ break;
+#endif
+
+#ifdef CONFIG_INTERRUPT_OFF_HIST
+ case INTERRUPT_LATENCY:
+ for_each_online_cpu(cpu) {
+ hist = &per_cpu(interrupt_off_hist, cpu);
+ hist_reset(hist);
+ }
+ break;
+#endif
+ }
+
+ return size;
+}
+
+static struct file_operations latency_hist_reset_seq_fops = {
+ .write = latency_hist_reset,
+};
+
+static struct proc_dir_entry *interrupt_off_reset;
+static struct proc_dir_entry *preempt_off_reset;
+static struct proc_dir_entry *wakeup_latency_reset;
+
static __init int latency_hist_init(void)
{
struct proc_dir_entry *tmp_parent_proc_dir;
@@ -214,11 +276,10 @@ static __init int latency_hist_init(void
latency_hist_root = proc_mkdir(latency_hist_proc_dir_root, NULL);
-
#ifdef CONFIG_INTERRUPT_OFF_HIST
tmp_parent_proc_dir = proc_mkdir(interrupt_off_hist_proc_dir, latency_hist_root);
- for (i = 0; i < NR_CPUS; i++) {
- len = sprintf(procname, "%s%d", percpu_proc_name, i);
+ for (i = 0; i < num_possible_cpus(); i++) {
+ len = sprintf(procname, "CPU%d", i);
procname[len] = '\0';
entry[INTERRUPT_LATENCY][i] =
create_proc_entry(procname, 0, tmp_parent_proc_dir);
@@ -228,12 +289,15 @@ static __init int latency_hist_init(void
atomic_set(&my_hist->hist_mode,1);
my_hist->min_lat = 0xFFFFFFFFUL;
}
+ interrupt_off_reset = create_proc_entry("reset", 0, tmp_parent_proc_dir);
+ interrupt_off_reset->data = INTERRUPT_LATENCY;
+ interrupt_off_reset->proc_fops = &latency_hist_reset_seq_fops;
#endif
#ifdef CONFIG_PREEMPT_OFF_HIST
tmp_parent_proc_dir = proc_mkdir(preempt_off_hist_proc_dir, latency_hist_root);
- for (i = 0; i < NR_CPUS; i++) {
- len = sprintf(procname, "%s%d", percpu_proc_name, i);
+ for (i = 0; i < num_possible_cpus(); i++) {
+ len = sprintf(procname, "CPU%d", i);
procname[len] = '\0';
entry[PREEMPT_LATENCY][i] =
create_proc_entry(procname, 0, tmp_parent_proc_dir);
@@ -243,12 +307,15 @@ static __init int latency_hist_init(void
atomic_set(&my_hist->hist_mode,1);
my_hist->min_lat = 0xFFFFFFFFUL;
}
+ preempt_off_reset = create_proc_entry("reset", 0, tmp_parent_proc_dir);
+ preempt_off_reset->data = PREEMPT_LATENCY;
+ preempt_off_reset->proc_fops = &latency_hist_reset_seq_fops;
#endif
#ifdef CONFIG_WAKEUP_LATENCY_HIST
tmp_parent_proc_dir = proc_mkdir(wakeup_latency_hist_proc_dir, latency_hist_root);
- for (i = 0; i < NR_CPUS; i++) {
- len = sprintf(procname, "%s%d", percpu_proc_name, i);
+ for (i = 0; i < num_possible_cpus(); i++) {
+ len = sprintf(procname, "CPU%d", i);
procname[len] = '\0';
entry[WAKEUP_LATENCY][i] =
create_proc_entry(procname, 0, tmp_parent_proc_dir);
@@ -258,38 +325,12 @@ static __init int latency_hist_init(void
atomic_set(&my_hist->hist_mode,1);
my_hist->min_lat = 0xFFFFFFFFUL;
}
+ wakeup_latency_reset = create_proc_entry("reset", 0, tmp_parent_proc_dir);
+ wakeup_latency_reset->data = WAKEUP_LATENCY;
+ wakeup_latency_reset->proc_fops = &latency_hist_reset_seq_fops;
#endif
return 0;
}
__initcall(latency_hist_init);
-
-
-#ifdef CONFIG_WAKEUP_LATENCY_HIST
-static void hist_reset(hist_data_t *hist)
-{
- atomic_dec(&hist->hist_mode);
-
- memset(hist->hist_array, 0, sizeof(hist->hist_array));
- hist->beyond_hist_bound_samples = 0UL;
- hist->min_lat = 0xFFFFFFFFUL;
- hist->max_lat = 0UL;
- hist->total_samples = 0UL;
- hist->accumulate_lat = 0UL;
- hist->avg_lat = 0UL;
-
- atomic_inc(&hist->hist_mode);
-}
-
-void latency_hist_reset(void)
-{
- int cpu;
- hist_data_t *hist;
-
- for_each_online_cpu(cpu) {
- hist = &per_cpu(wakeup_latency_hist, cpu);
- hist_reset(hist);
- }
-}
-#endif
Index: linux-2.6.22/kernel/latency_trace.c
===================================================================
--- linux-2.6.22.orig/kernel/latency_trace.c
+++ linux-2.6.22/kernel/latency_trace.c
@@ -2207,19 +2207,6 @@ check_wakeup_timing(struct cpu_trace *tr
if (!report_latency(delta))
goto out;
-#ifdef CONFIG_WAKEUP_LATENCY_HIST
- /*
- * Was preempt_max_latency reset?
- * If so, we reinitialize the latency histograms to keep them in sync.
- *
- * FIXME: Remove the poll and write our own procfs handler, so
- * we can trigger on the write to preempt_max_latency
- */
- if (last_preempt_max_latency > 0 && preempt_max_latency == 0)
- latency_hist_reset();
- last_preempt_max_latency = preempt_max_latency;
-#endif
-
____trace(smp_processor_id(), TRACE_FN, tr, CALLER_ADDR0, parent_eip,
0, 0, 0, *flags);
--
^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH -rt 8/8] stop critical timing in idle.
2007-08-28 21:37 [PATCH -rt 1/8] introduce PICK_FUNCTION Daniel Walker
` (5 preceding siblings ...)
2007-08-28 21:37 ` [PATCH -rt 7/8] latency hist: add resetting for all timing options Daniel Walker
@ 2007-08-28 21:37 ` Daniel Walker
2007-08-28 23:44 ` [PATCH -rt 1/8] introduce PICK_FUNCTION Nick Piggin
7 siblings, 0 replies; 10+ messages in thread
From: Daniel Walker @ 2007-08-28 21:37 UTC (permalink / raw)
To: mingo; +Cc: mingo, linux-kernel, linux-rt-users
[-- Attachment #1: fix-idle-latency-tracing.patch --]
[-- Type: text/plain, Size: 1656 bytes --]
without this the idle routine still gets traced.. This is done already
for ACPI idle , but it should also be done for other idle routines.
Signed-off-by: Daniel Walker <dwalker@mvista.com>
---
arch/i386/kernel/process.c | 9 +++++++++
arch/x86_64/kernel/process.c | 10 ++++++++++
2 files changed, 19 insertions(+)
Index: linux-2.6.22/arch/i386/kernel/process.c
===================================================================
--- linux-2.6.22.orig/arch/i386/kernel/process.c
+++ linux-2.6.22/arch/i386/kernel/process.c
@@ -197,8 +197,17 @@ void cpu_idle(void)
if (cpu_is_offline(cpu))
play_dead();
+ /*
+ * We have irqs disabled here, so stop latency tracing
+ * at this point and restart it after we return:
+ */
+ stop_critical_timing();
+
__get_cpu_var(irq_stat).idle_timestamp = jiffies;
idle();
+
+ touch_critical_timing();
+
}
local_irq_disable();
trace_preempt_exit_idle();
Index: linux-2.6.22/arch/x86_64/kernel/process.c
===================================================================
--- linux-2.6.22.orig/arch/x86_64/kernel/process.c
+++ linux-2.6.22/arch/x86_64/kernel/process.c
@@ -223,8 +223,18 @@ void cpu_idle (void)
* Otherwise, idle callbacks can misfire.
*/
local_irq_disable();
+
+ /*
+ * We have irqs disabled here, so stop latency tracing
+ * at this point and restart it after we return:
+ */
+ stop_critical_timing();
+
enter_idle();
idle();
+
+ touch_critical_timing();
+
/* In many cases the interrupt that ended idle
has already called exit_idle. But some idle
loops can be woken up without interrupt. */
--
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH -rt 1/8] introduce PICK_FUNCTION
2007-08-28 21:37 [PATCH -rt 1/8] introduce PICK_FUNCTION Daniel Walker
` (6 preceding siblings ...)
2007-08-28 21:37 ` [PATCH -rt 8/8] stop critical timing in idle Daniel Walker
@ 2007-08-28 23:44 ` Nick Piggin
2007-08-28 23:54 ` Daniel Walker
7 siblings, 1 reply; 10+ messages in thread
From: Nick Piggin @ 2007-08-28 23:44 UTC (permalink / raw)
To: Daniel Walker; +Cc: mingo, mingo, linux-kernel, linux-rt-users, Peter Zijlstra
Daniel Walker wrote:
> PICK_FUNCTION() is similar to the other PICK_OP style macros, and was
> created to replace them all. I used variable argument macros to handle
> PICK_FUNC_2ARG/PICK_FUNC_1ARG. Otherwise the marcos are similar to the
> original macros used for semaphores. The entire system is used to do a
> compile time switch between two different locking APIs. For example,
> real spinlocks (raw_spinlock_t) and mutexes (or sleeping spinlocks).
>
> This new macro replaces all the duplication from lock type to lock type.
> The result of this patch, and the next two, is a fairly nice simplification,
> and consolidation. Although the seqlock changes are larger than the originals
> I think over all the patchset is worth while.
>
> Incorporated peterz's suggestion to not require TYPE_EQUAL() to only
> use pointers.
How come this is cc'ed to lkml? Is it something that is relevant to
the mainline kernel... or?
--
SUSE Labs, Novell Inc.
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH -rt 1/8] introduce PICK_FUNCTION
2007-08-28 23:44 ` [PATCH -rt 1/8] introduce PICK_FUNCTION Nick Piggin
@ 2007-08-28 23:54 ` Daniel Walker
0 siblings, 0 replies; 10+ messages in thread
From: Daniel Walker @ 2007-08-28 23:54 UTC (permalink / raw)
To: Nick Piggin; +Cc: mingo, mingo, linux-kernel, linux-rt-users, Peter Zijlstra
On Wed, 2007-08-29 at 09:44 +1000, Nick Piggin wrote:
> Daniel Walker wrote:
> > PICK_FUNCTION() is similar to the other PICK_OP style macros, and was
> > created to replace them all. I used variable argument macros to handle
> > PICK_FUNC_2ARG/PICK_FUNC_1ARG. Otherwise the marcos are similar to the
> > original macros used for semaphores. The entire system is used to do a
> > compile time switch between two different locking APIs. For example,
> > real spinlocks (raw_spinlock_t) and mutexes (or sleeping spinlocks).
> >
> > This new macro replaces all the duplication from lock type to lock type.
> > The result of this patch, and the next two, is a fairly nice simplification,
> > and consolidation. Although the seqlock changes are larger than the originals
> > I think over all the patchset is worth while.
> >
> > Incorporated peterz's suggestion to not require TYPE_EQUAL() to only
> > use pointers.
>
> How come this is cc'ed to lkml? Is it something that is relevant to
> the mainline kernel... or?
The real time changes are usually developed on lkml , that's how it's
been in the past. I personally like CC'ing lkml since real time can
sometimes touch lots of different subsystems .. So it good to have a
diverse set of people reviewing ..
Daniel
^ permalink raw reply [flat|nested] 10+ messages in thread
end of thread, other threads:[~2007-08-29 0:03 UTC | newest]
Thread overview: 10+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2007-08-28 21:37 [PATCH -rt 1/8] introduce PICK_FUNCTION Daniel Walker
2007-08-28 21:37 ` [PATCH -rt 2/8] spinlocks/rwlocks: use PICK_FUNCTION() Daniel Walker
2007-08-28 21:37 ` [PATCH -rt 3/8] seqlocks: use PICK_FUNCTION Daniel Walker
2007-08-28 21:37 ` [PATCH -rt 4/8] fork: desched_thread comment rework Daniel Walker
2007-08-28 21:37 ` [PATCH -rt 5/8] latency tracing: use now() consistently Daniel Walker
2007-08-28 21:37 ` [PATCH -rt 6/8] preempt_max_latency in all modes Daniel Walker
2007-08-28 21:37 ` [PATCH -rt 7/8] latency hist: add resetting for all timing options Daniel Walker
2007-08-28 21:37 ` [PATCH -rt 8/8] stop critical timing in idle Daniel Walker
2007-08-28 23:44 ` [PATCH -rt 1/8] introduce PICK_FUNCTION Nick Piggin
2007-08-28 23:54 ` Daniel Walker
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox