diff -urN linux-2.6.0-test11.lockmeter.orig/include/asm-ia64/spinlock.h linux-2.6.0-test11.lockmeter/include/asm-ia64/spinlock.h --- linux-2.6.0-test11.lockmeter.orig/include/asm-ia64/spinlock.h 2003-12-09 13:03:36.000000000 +0100 +++ linux-2.6.0-test11.lockmeter/include/asm-ia64/spinlock.h 2003-12-09 13:08:24.000000000 +0100 @@ -247,13 +247,33 @@ extern void _metered_spin_unlock(spinlock_t *lock); /* - * Use a less efficient, and inline, atomic_dec_and_lock() if lockmetering - * so we can see the callerPC of who is actually doing the spin_lock(). - * Otherwise, all we see is the generic rollup of all locks done by - * atomic_dec_and_lock(). + * Matches what is in arch/ia64/lib/dec_and_lock.c, except this one is + * "static inline" so that the spin_lock(), if actually invoked, is charged + * against the real caller, not against the catch-all atomic_dec_and_lock */ static inline int atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) { + int counter; + int newcount; + +repeat: + counter = atomic_read(atomic); + newcount = counter-1; + + if (!newcount) + goto slow_path; + + asm volatile("mov ar.ccv=%1;;\n\t" + "cmpxchg4.acq %0=%2,%3,ar.ccv;;" + :"=r" (newcount) + :"r" (counter), "m" (atomic->counter), "r" (newcount) + :"ar.ccv"); + + if (newcount != counter) + goto repeat; + return 0; + +slow_path: _metered_spin_lock(lock); if (atomic_dec_and_test(atomic)) return 1;