diff -urN linux-2.6.0-test11.lockmeter.orig/arch/ia64/lib/dec_and_lock.c linux-2.6.0-test11.lockmeter/arch/ia64/lib/dec_and_lock.c --- linux-2.6.0-test11.lockmeter.orig/arch/ia64/lib/dec_and_lock.c 2003-12-11 10:44:10.000000000 +0100 +++ linux-2.6.0-test11.lockmeter/arch/ia64/lib/dec_and_lock.c 2003-12-11 10:43:47.000000000 +0100 @@ -13,6 +13,7 @@ #include #include +#ifndef ATOMIC_DEC_AND_LOCK int atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) { int counter; @@ -36,3 +37,4 @@ spin_unlock(lock); return 0; } +#endif diff -urN linux-2.6.0-test11.lockmeter.orig/include/asm-ia64/spinlock.h linux-2.6.0-test11.lockmeter/include/asm-ia64/spinlock.h --- linux-2.6.0-test11.lockmeter.orig/include/asm-ia64/spinlock.h 2003-12-11 10:09:42.000000000 +0100 +++ linux-2.6.0-test11.lockmeter/include/asm-ia64/spinlock.h 2003-12-11 10:15:45.000000000 +0100 @@ -247,13 +247,27 @@ extern void _metered_spin_unlock(spinlock_t *lock); /* - * Use a less efficient, and inline, atomic_dec_and_lock() if lockmetering - * so we can see the callerPC of who is actually doing the spin_lock(). - * Otherwise, all we see is the generic rollup of all locks done by - * atomic_dec_and_lock(). + * Matches what is in arch/ia64/lib/dec_and_lock.c, except this one is + * "static inline" so that the spin_lock(), if actually invoked, is charged + * against the real caller, not against the catch-all atomic_dec_and_lock */ static inline int atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) { + int counter; + int newcount; + +repeat: + counter = atomic_read(atomic); + newcount = counter-1; + + if (!newcount) + goto slow_path; + + if(cmpxchg(&atomic->counter, counter, newcount) != counter) + goto repeat; + return 0; + +slow_path: _metered_spin_lock(lock); if (atomic_dec_and_test(atomic)) return 1;