public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* optimised rw-semaphores for MIPS/MIPS64
@ 2001-07-11 14:00 David Howells
  2001-07-11 14:18 ` David Howells
  2001-07-11 16:41 ` Andrea Arcangeli
  0 siblings, 2 replies; 3+ messages in thread
From: David Howells @ 2001-07-11 14:00 UTC (permalink / raw)
  To: ralf; +Cc: linux-mips, linux-kernel

[-- Attachment #1: Type: text/plain, Size: 441 bytes --]

Hello Ralf,

I've produced an inline-assembly optimised Read/Write Semaphore patch for MIPS
against linux-2.4.7-pre6 (rwsem.diff). David Woodhouse has tested it.

I've also included a patch to the MIPS64 arch to supply that with optimised
rwsems (rwsem64.diff), but that is untested.

Finally, I've included a patch to fix lib/rwsem.c (lib-rwsem.diff) if you want
to try the patch on MIPS64 against anything earlier than 2.4.7-pre6.

David


[-- Attachment #2: MIPS optimised rwsem patch --]
[-- Type: text/plain, Size: 5702 bytes --]

diff -uNr -x CVS -x TAGS linux-2.4-mips/arch/mips/config.in linux-mips-rwsem/arch/mips/config.in
--- linux-2.4-mips/arch/mips/config.in	Thu Jun 28 13:58:46 2001
+++ linux-mips-rwsem/arch/mips/config.in	Wed Jul 11 14:38:19 2001
@@ -68,8 +68,8 @@
    fi
 bool 'Support for Alchemy Semi PB1000 board' CONFIG_MIPS_PB1000
 
-define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y
-define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n
+define_bool CONFIG_RWSEM_GENERIC_SPINLOCK n
+define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM y
 
 #
 # Select some configuration options automatically for certain systems.
diff -uNr -x CVS -x TAGS linux-2.4-mips/include/asm-mips/rwsem.h linux-mips-rwsem/include/asm-mips/rwsem.h
--- linux-2.4-mips/include/asm-mips/rwsem.h	Thu Jan  1 01:00:00 1970
+++ linux-mips-rwsem/include/asm-mips/rwsem.h	Wed Jul 11 14:35:09 2001
@@ -0,0 +1,206 @@
+/* rwsem.h: R/W semaphores implemented using MIPS32 LL/SC
+ *
+ * Written by David Howells (dhowells@redhat.com).
+ *
+ * Derived from:
+ * - asm-i386/rwsem.h: written by David Howells (dhowells@redhat.com).
+ * - asm-alpha/rwsem.h: written by Ivan Kokshaysky <ink@jurassic.park.msu.ru>, 2001.
+ */
+#ifndef _ASM_RWSEM_H
+#define _ASM_RWSEM_H
+
+#ifndef _LINUX_RWSEM_H
+#error please dont include asm/rwsem.h directly, use linux/rwsem.h instead
+#endif
+
+#ifdef __KERNEL__
+
+#include <asm/compiler.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+struct rwsem_waiter;
+
+extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
+
+/*
+ * the semaphore definition
+ */
+struct rw_semaphore {
+	signed long			count;
+#define RWSEM_UNLOCKED_VALUE		0x00000000L
+#define RWSEM_ACTIVE_BIAS		0x00000001L
+#define RWSEM_ACTIVE_MASK		0x0000ffffL
+#define RWSEM_WAITING_BIAS		(-0x00010000L)
+#define RWSEM_ACTIVE_READ_BIAS		RWSEM_ACTIVE_BIAS
+#define RWSEM_ACTIVE_WRITE_BIAS		(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+	spinlock_t		wait_lock;
+	struct list_head	wait_list;
+#if RWSEM_DEBUG
+	int			debug;
+#endif
+};
+
+#if RWSEM_DEBUG
+#define __RWSEM_DEBUG_INIT      , 0
+#else
+#define __RWSEM_DEBUG_INIT	/* */
+#endif
+
+#define __RWSEM_INITIALIZER(name) \
+	{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
+	LIST_HEAD_INIT((name).wait_list) __RWSEM_DEBUG_INIT }
+
+#define DECLARE_RWSEM(name) \
+	struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+
+static inline void init_rwsem(struct rw_semaphore *sem)
+{
+	sem->count = RWSEM_UNLOCKED_VALUE;
+	spin_lock_init(&sem->wait_lock);
+	INIT_LIST_HEAD(&sem->wait_list);
+#if RWSEM_DEBUG
+	sem->debug = 0;
+#endif
+}
+
+static inline void __down_read(struct rw_semaphore *sem)
+{
+	long oldcount;
+#ifndef	CONFIG_SMP
+	oldcount = sem->count;
+	sem->count += RWSEM_ACTIVE_READ_BIAS;
+#else
+	long temp;
+	__asm__ __volatile__(
+	"	.set	noreorder\n"
+	"1:	ll	%0,%1\n"
+	"	addiu	%2,%0,%3\n"
+	"	sc	%2,%1\n"
+	"	beq	%2,0,1b\n"
+	"	 sync	\n"
+	"	.set	reorder"
+	: "=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
+	: "I"(RWSEM_ACTIVE_READ_BIAS), "m" (sem->count)
+	: "memory");
+#endif
+	if (__builtin_expect(oldcount < 0, 0))
+		rwsem_down_read_failed(sem);
+}
+
+static inline void __down_write(struct rw_semaphore *sem)
+{
+	long oldcount;
+#ifndef	CONFIG_SMP
+	oldcount = sem->count;
+	sem->count += RWSEM_ACTIVE_WRITE_BIAS;
+#else
+	long temp;
+	__asm__ __volatile__(
+	"	.set	noreorder\n"
+	"1:	ll	%0,%1\n"
+	"	addu	%2,%0,%3\n"
+	"	sc	%2,%1\n"
+	"	beq	%2,0,1b\n"
+	"	 sync	\n"
+	"	.set	reorder"
+	: "=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
+	: "r" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count)
+	: "memory");
+#endif
+	if (__builtin_expect(oldcount, 0))
+		rwsem_down_write_failed(sem);
+}
+
+static inline void __up_read(struct rw_semaphore *sem)
+{
+	long oldcount;
+#ifndef	CONFIG_SMP
+	oldcount = sem->count;
+	sem->count -= RWSEM_ACTIVE_READ_BIAS;
+#else
+	long temp;
+	__asm__ __volatile__(
+	"	.set	noreorder\n"
+	"1:	ll	%0,%1\n"
+	"	addiu	%2,%0,%3\n"	/* no dsubiu */
+	"	sc	%2,%1\n"
+	"	beq	%2,0,1b\n"
+	"	 sync	\n"
+	"	.set	reorder"
+	: "=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
+	: "I" (-RWSEM_ACTIVE_READ_BIAS), "m" (sem->count)
+	: "memory");
+#endif
+	if (__builtin_expect(oldcount < 0, 0)) 
+		if ((int)oldcount - RWSEM_ACTIVE_READ_BIAS == 0)
+			rwsem_wake(sem);
+}
+
+static inline void __up_write(struct rw_semaphore *sem)
+{
+	long count;
+#ifndef	CONFIG_SMP
+	sem->count -= RWSEM_ACTIVE_WRITE_BIAS;
+	count = sem->count;
+#else
+	long temp;
+	__asm__ __volatile__(
+	"	.set	noreorder\n"
+	"1:	ll	%0,%1\n"
+	"	subu	%2,%0,%3\n"
+	"	sc	%2,%1\n"
+	"	beq	%2,0,1b\n"
+	"	 dsubu	%0,%3,%0\n"
+	"	sync	\n"
+	"	.set	reorder"
+	: "=&r" (count), "=m" (sem->count), "=&r" (temp)
+	: "I" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count)
+	: "memory");
+#endif
+	if (__builtin_expect(count, 0))
+		if ((int)count == 0)
+			rwsem_wake(sem);
+}
+
+static inline void rwsem_atomic_add(long val, struct rw_semaphore *sem)
+{
+#ifndef	CONFIG_SMP
+	sem->count += val;
+#else
+	long temp;
+	__asm__ __volatile__(
+	"1:	ll	%0,%1\n"
+	"	addu	%0,%2,%0\n"
+	"	sc	%0,%1\n"
+	"	beq	%0,0,1b\n"
+	"	 nop	\n"
+	:"=&r" (temp), "=m" (sem->count)
+	:"r" (val), "m" (sem->count));
+#endif
+}
+
+static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem)
+{
+#ifndef	CONFIG_SMP
+	sem->count += val;
+	return sem->count;
+#else
+	long ret, temp;
+	__asm__ __volatile__(
+	"1:	ll	%0,%1\n"
+	"	addu 	%2,%0,%3\n"
+	"	sc	%2,%1\n"
+	"	beq	%2,0,1b\n"
+	"	 addu	%0,%3,%0\n"
+	:"=&r" (ret), "=m" (sem->count), "=&r" (temp)
+	:"Ir" (val), "m" (sem->count));
+
+	return ret;
+#endif
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_RWSEM_H */

[-- Attachment #3: MIPS64 optimised rwsem patch --]
[-- Type: text/plain, Size: 5716 bytes --]

diff -uNr -x CVS -x TAGS linux-2.4-mips/arch/mips64/config.in linux-mips-rwsem/arch/mips64/config.in
--- linux-2.4-mips/arch/mips64/config.in	Thu Jun 28 13:59:11 2001
+++ linux-mips-rwsem/arch/mips64/config.in	Wed Jul 11 14:38:25 2001
@@ -27,8 +27,8 @@
 fi
 endmenu
 
-define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y
-define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n
+define_bool CONFIG_RWSEM_GENERIC_SPINLOCK n
+define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM y
 
 #
 # Select some configuration options automatically based on user selections
diff -uNr -x CVS -x TAGS linux-2.4-mips/include/asm-mips64/rwsem.h linux-mips-rwsem/include/asm-mips64/rwsem.h
--- linux-2.4-mips/include/asm-mips64/rwsem.h	Thu Jan  1 01:00:00 1970
+++ linux-mips-rwsem/include/asm-mips64/rwsem.h	Wed Jul 11 14:35:48 2001
@@ -0,0 +1,206 @@
+/* rwsem.h: R/W semaphores implemented using MIPS64 LLD/SCD
+ *
+ * Written by David Howells (dhowells@redhat.com).
+ *
+ * Derived from:
+ * - asm-i386/rwsem.h: written by David Howells (dhowells@redhat.com).
+ * - asm-alpha/rwsem.h: written by Ivan Kokshaysky <ink@jurassic.park.msu.ru>, 2001.
+ */
+#ifndef _ASM_RWSEM_H
+#define _ASM_RWSEM_H
+
+#ifndef _LINUX_RWSEM_H
+#error please dont include asm/rwsem.h directly, use linux/rwsem.h instead
+#endif
+
+#ifdef __KERNEL__
+
+#include <asm/compiler.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+struct rwsem_waiter;
+
+extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
+
+/*
+ * the semaphore definition
+ */
+struct rw_semaphore {
+	signed long			count;
+#define RWSEM_UNLOCKED_VALUE		0x0000000000000000L
+#define RWSEM_ACTIVE_BIAS		0x0000000000000001L
+#define RWSEM_ACTIVE_MASK		0x00000000ffffffffL
+#define RWSEM_WAITING_BIAS		(-0x0000000100000000L)
+#define RWSEM_ACTIVE_READ_BIAS		RWSEM_ACTIVE_BIAS
+#define RWSEM_ACTIVE_WRITE_BIAS		(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+	spinlock_t		wait_lock;
+	struct list_head	wait_list;
+#if RWSEM_DEBUG
+	int			debug;
+#endif
+};
+
+#if RWSEM_DEBUG
+#define __RWSEM_DEBUG_INIT      , 0
+#else
+#define __RWSEM_DEBUG_INIT	/* */
+#endif
+
+#define __RWSEM_INITIALIZER(name) \
+	{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
+	LIST_HEAD_INIT((name).wait_list) __RWSEM_DEBUG_INIT }
+
+#define DECLARE_RWSEM(name) \
+	struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+
+static inline void init_rwsem(struct rw_semaphore *sem)
+{
+	sem->count = RWSEM_UNLOCKED_VALUE;
+	spin_lock_init(&sem->wait_lock);
+	INIT_LIST_HEAD(&sem->wait_list);
+#if RWSEM_DEBUG
+	sem->debug = 0;
+#endif
+}
+
+static inline void __down_read(struct rw_semaphore *sem)
+{
+	long oldcount;
+#ifndef	CONFIG_SMP
+	oldcount = sem->count;
+	sem->count += RWSEM_ACTIVE_READ_BIAS;
+#else
+	long temp;
+	__asm__ __volatile__(
+	"	.set	noreorder\n"
+	"1:	lld	%0,%1\n"
+	"	daddiu	%2,%0,%3\n"
+	"	scd	%2,%1\n"
+	"	beq	%2,0,1b\n"
+	"	 sync	\n"
+	"	.set	reorder"
+	: "=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
+	: "I"(RWSEM_ACTIVE_READ_BIAS), "m" (sem->count)
+	: "memory");
+#endif
+	if (__builtin_expect(oldcount < 0, 0))
+		rwsem_down_read_failed(sem);
+}
+
+static inline void __down_write(struct rw_semaphore *sem)
+{
+	long oldcount;
+#ifndef	CONFIG_SMP
+	oldcount = sem->count;
+	sem->count += RWSEM_ACTIVE_WRITE_BIAS;
+#else
+	long temp;
+	__asm__ __volatile__(
+	"	.set	noreorder\n"
+	"1:	lld	%0,%1\n"
+	"	daddu	%2,%0,%3\n"
+	"	scd	%2,%1\n"
+	"	beq	%2,0,1b\n"
+	"	 sync	\n"
+	"	.set	reorder"
+	: "=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
+	: "r" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count)
+	: "memory");
+#endif
+	if (__builtin_expect(oldcount, 0))
+		rwsem_down_write_failed(sem);
+}
+
+static inline void __up_read(struct rw_semaphore *sem)
+{
+	long oldcount;
+#ifndef	CONFIG_SMP
+	oldcount = sem->count;
+	sem->count -= RWSEM_ACTIVE_READ_BIAS;
+#else
+	long temp;
+	__asm__ __volatile__(
+	"	.set	noreorder\n"
+	"1:	lld	%0,%1\n"
+	"	daddiu	%2,%0,%3\n"	/* no dsubiu */
+	"	scd	%2,%1\n"
+	"	beq	%2,0,1b\n"
+	"	 sync	\n"
+	"	.set	reorder"
+	: "=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
+	: "I" (-RWSEM_ACTIVE_READ_BIAS), "m" (sem->count)
+	: "memory");
+#endif
+	if (__builtin_expect(oldcount < 0, 0)) 
+		if ((int)oldcount - RWSEM_ACTIVE_READ_BIAS == 0)
+			rwsem_wake(sem);
+}
+
+static inline void __up_write(struct rw_semaphore *sem)
+{
+	long count;
+#ifndef	CONFIG_SMP
+	sem->count -= RWSEM_ACTIVE_WRITE_BIAS;
+	count = sem->count;
+#else
+	long temp;
+	__asm__ __volatile__(
+	"	.set	noreorder\n"
+	"1:	lld	%0,%1\n"
+	"	dsubu	%2,%0,%3\n"
+	"	scd	%2,%1\n"
+	"	beq	%2,0,1b\n"
+	"	 dsubu	%0,%3,%0\n"
+	"	sync	\n"
+	"	.set	reorder"
+	: "=&r" (count), "=m" (sem->count), "=&r" (temp)
+	: "I" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count)
+	: "memory");
+#endif
+	if (__builtin_expect(count, 0))
+		if ((int)count == 0)
+			rwsem_wake(sem);
+}
+
+static inline void rwsem_atomic_add(long val, struct rw_semaphore *sem)
+{
+#ifndef	CONFIG_SMP
+	sem->count += val;
+#else
+	long temp;
+	__asm__ __volatile__(
+	"1:	lld	%0,%1\n"
+	"	daddu	%0,%2,%0\n"
+	"	scd	%0,%1\n"
+	"	beq	%0,0,1b\n"
+	"	 nop	\n"
+	:"=&r" (temp), "=m" (sem->count)
+	:"r" (val), "m" (sem->count));
+#endif
+}
+
+static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem)
+{
+#ifndef	CONFIG_SMP
+	sem->count += val;
+	return sem->count;
+#else
+	long ret, temp;
+	__asm__ __volatile__(
+	"1:	lld	%0,%1\n"
+	"	daddu 	%2,%0,%3\n"
+	"	scd	%2,%1\n"
+	"	beq	%2,0,1b\n"
+	"	 daddu	%0,%3,%0\n"
+	:"=&r" (ret), "=m" (sem->count), "=&r" (temp)
+	:"Ir" (val), "m" (sem->count));
+
+	return ret;
+#endif
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_RWSEM_H */

[-- Attachment #4: 64-bit rwsem fix for older kernels --]
[-- Type: text/plain, Size: 469 bytes --]

diff -uNr -x CVS -x TAGS linux-2.4-mips/lib/rwsem.c linux-mips-rwsem/lib/rwsem.c
--- linux-2.4-mips/lib/rwsem.c	Mon Apr 30 09:55:41 2001
+++ linux-mips-rwsem/lib/rwsem.c	Wed Jul 11 14:04:02 2001
@@ -112,7 +112,7 @@
  */
 static inline struct rw_semaphore *rwsem_down_failed_common(struct rw_semaphore *sem,
 								 struct rwsem_waiter *waiter,
-								 __s32 adjustment)
+								 signed long adjustment)
 {
 	struct task_struct *tsk = current;
 	signed long count;

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: optimised rw-semaphores for MIPS/MIPS64
  2001-07-11 14:00 optimised rw-semaphores for MIPS/MIPS64 David Howells
@ 2001-07-11 14:18 ` David Howells
  2001-07-11 16:41 ` Andrea Arcangeli
  1 sibling, 0 replies; 3+ messages in thread
From: David Howells @ 2001-07-11 14:18 UTC (permalink / raw)
  To: ralf; +Cc: linux-mips, linux-kernel

[-- Attachment #1: Type: text/plain, Size: 57 bytes --]

You'll also need the asm-mips*/compiler.h patch.

David


[-- Attachment #2: compiler smoothing header files patch --]
[-- Type: text/plain, Size: 1484 bytes --]

diff -uNr -x CVS -x TAGS linux-2.4-mips/include/asm-mips/compiler.h linux-mips-rwsem/include/asm-mips/compiler.h
--- linux-2.4-mips/include/asm-mips/compiler.h	Thu Jan  1 01:00:00 1970
+++ linux-mips-rwsem/include/asm-mips/compiler.h	Wed Jul 11 15:12:33 2001
@@ -0,0 +1,13 @@
+#ifndef __ASM_COMPILER_H
+#define __ASM_COMPILER_H
+
+/* Somewhere in the middle of the GCC 2.96 development cycle, we implemented
+   a mechanism by which the user can annotate likely branch directions and
+   expect the blocks to be reordered appropriately.  Define __builtin_expect
+   to nothing for earlier compilers.  */
+
+#if __GNUC__ == 2 && __GNUC_MINOR__ < 96
+#define __builtin_expect(x, expected_value) (x)
+#endif
+
+#endif /* __ASM_COMPILER_H */
diff -uNr -x CVS -x TAGS linux-2.4-mips/include/asm-mips64/compiler.h linux-mips-rwsem/include/asm-mips64/compiler.h
--- linux-2.4-mips/include/asm-mips64/compiler.h	Thu Jan  1 01:00:00 1970
+++ linux-mips-rwsem/include/asm-mips64/compiler.h	Wed Jul 11 15:12:05 2001
@@ -0,0 +1,13 @@
+#ifndef __ASM_COMPILER_H
+#define __ASM_COMPILER_H
+
+/* Somewhere in the middle of the GCC 2.96 development cycle, we implemented
+   a mechanism by which the user can annotate likely branch directions and
+   expect the blocks to be reordered appropriately.  Define __builtin_expect
+   to nothing for earlier compilers.  */
+
+#if __GNUC__ == 2 && __GNUC_MINOR__ < 96
+#define __builtin_expect(x, expected_value) (x)
+#endif
+
+#endif /* __ASM_COMPILER_H */

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: optimised rw-semaphores for MIPS/MIPS64
  2001-07-11 14:00 optimised rw-semaphores for MIPS/MIPS64 David Howells
  2001-07-11 14:18 ` David Howells
@ 2001-07-11 16:41 ` Andrea Arcangeli
  1 sibling, 0 replies; 3+ messages in thread
From: Andrea Arcangeli @ 2001-07-11 16:41 UTC (permalink / raw)
  To: David Howells; +Cc: ralf, linux-mips, linux-kernel, Linus Torvalds

On Wed, Jul 11, 2001 at 03:00:47PM +0100, David Howells wrote:
> Hello Ralf,
> 
> I've produced an inline-assembly optimised Read/Write Semaphore patch for MIPS
> against linux-2.4.7-pre6 (rwsem.diff). David Woodhouse has tested it.

I don't understand why you keep writing code on top of your
mathematically slower rwsem framework.

the C version is much much slower, measured several times.

The asm version has a inferior slow path design that force you having to
do slower and quite tricky stuff in the up_write fast path. This is your
up_write fast path:

		"  movl      %2,%%edx\n\t"
LOCK_PREFIX	"  xaddl     %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
		"  jnz       2f\n\t" /* jump if the lock is being waited upon */

Plus you also clobber %dx in the up_write fast path which I don't need
to.

This is my up_write fast path:

	__asm__ __volatile__(LOCK "subl %2, %0\n\t"
			     "js 2f\n"

Also the design of my xadd slow path looks more readable to me but ok,
I'm biased about that, let's only care about the number of cycles you
have to spend on the fast paths as measure of the goodness of the
algorithm.

I'd suggest arch maintainers to port their rwsem asm optimized fast
paths on top of this patch and to submit me patches:

	ftp://ftp.us.kernel.org/pub/linux/kernel/people/andrea/kernels/v2.4/2.4.7pre5aa1/00_rwsem-14

I also recommend Linus to include the above patch into mainline (it will
reject on the alpha since in pre6 there is the asm optimized version on
top of the slower framework, you can simply temporary disable the asm
optimized version in alpha/config.in and it will compile just fine).

I have an old one from Ivan for alpha which I can just integrate after
auditing and comparison with the one in pre6, the port of the others
should be fairly easy too.

If arch maintainers will do it and it will be included in mainline this
would save me some work and would make the kernel faster.  If it won't
be included again for whatever reason (I got no reply last times) I will
just spend a day on it and I'll port all ports on top of the new code
myself and I'll maintain the faster rwsem in my tree for all archs in
the kernel (possibly with the exception of the ones that are not
converted to the xchgadd asm version yet) as I did so far for x86.

Andrea

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2001-07-11 16:46 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2001-07-11 14:00 optimised rw-semaphores for MIPS/MIPS64 David Howells
2001-07-11 14:18 ` David Howells
2001-07-11 16:41 ` Andrea Arcangeli

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox