public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* Bit operations with the ability to specify a synchronization mode
@ 2006-03-30 21:02 Christoph Lameter
  2006-03-31  0:18 ` Synchronizing Bit operations V2 Christoph Lameter
  0 siblings, 1 reply; 62+ messages in thread
From: Christoph Lameter @ 2006-03-30 21:02 UTC (permalink / raw)
  To: Nick Piggin
  Cc: Zoltan Menyhart, Boehm, Hans, Grundler, Grant G, Chen, Kenneth W,
	akpm, linux-kernel, linux-ia64

The following patchset implements the ability to specify a
synchronization mode for bit operations.

I.e. instead of set_bit(x,y) we can do set_bit(x,y, mode).

The following modes are supported:

MODE_NON_ATOMIC
	Use non atomic version.
	F.e. set_bit(x,y, MODE_NON_ATOMIC) == __set_bit(x,y)

MODE_ATOMIC
	The operation is atomic but there is no guarantee how this
	operation is ordered respective to other memory operations.

MODE_ACQUIRE
	An atomic operation that is guaranteed to occur before
	all subsequent memory accesses

MODE_RELEASE
	An atomic operation that is guaranteed to occur after
	all previos memory acceses.

MODE_BARRIER
	An atomic operation that is guaranteed to occur between
	previous and later memory operations.

For architectures that have no support for bitops with modes we
fall back to some combination of memory barriers and atomic ops.

This patchset defines architecture support for only IA64.
Others could be done in a similar fashion.

Note that the current semantics for bitops IA64 are broken. Both
smp_mb__after/before_clear_bit are now set to full memory barriers
to compensate which may affect performance.

The kernel core code would need to be fixed to add the proper
synchronization modes to restore prior performance (with then
correct locking semantics). If kernel code wants to use synchronization
modes then an

#include <asm/bitops_mode.h>

needs to be added.

Signed-off-by: Christoph Lameter <clameter@sgi.com>

Index: linux-2.6.16-mm2/include/asm-ia64/bitops.h
===================================================================
--- linux-2.6.16-mm2.orig/include/asm-ia64/bitops.h	2006-03-19 21:53:29.000000000 -0800
+++ linux-2.6.16-mm2/include/asm-ia64/bitops.h	2006-03-30 12:50:15.000000000 -0800
@@ -11,6 +11,7 @@
 
 #include <linux/compiler.h>
 #include <linux/types.h>
+#include <asm/bitops_mode.h>
 #include <asm/bitops.h>
 #include <asm/intrinsics.h>
 
@@ -19,8 +20,6 @@
  * @nr: the bit to set
  * @addr: the address to start counting from
  *
- * This function is atomic and may not be reordered.  See __set_bit()
- * if you do not require the atomic guarantees.
  * Note that @nr may be almost arbitrarily large; this function is not
  * restricted to acting on a single-word quantity.
  *
@@ -34,244 +33,106 @@
 static __inline__ void
 set_bit (int nr, volatile void *addr)
 {
-	__u32 bit, old, new;
-	volatile __u32 *m;
-	CMPXCHG_BUGCHECK_DECL
-
-	m = (volatile __u32 *) addr + (nr >> 5);
-	bit = 1 << (nr & 31);
-	do {
-		CMPXCHG_BUGCHECK(m);
-		old = *m;
-		new = old | bit;
-	} while (cmpxchg_acq(m, old, new) != old);
+	set_bit_mode(nr, addr, MODE_ATOMIC);
 }
 
 /**
  * __set_bit - Set a bit in memory
  * @nr: the bit to set
  * @addr: the address to start counting from
- *
- * Unlike set_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
  */
 static __inline__ void
 __set_bit (int nr, volatile void *addr)
 {
-	*((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
+	set_bit_mode(nr, addr, MODE_NON_ATOMIC);
 }
 
-/*
- * clear_bit() has "acquire" semantics.
- */
 #define smp_mb__before_clear_bit()	smp_mb()
-#define smp_mb__after_clear_bit()	do { /* skip */; } while (0)
+#define smp_mb__after_clear_bit()	smp_mb()
 
 /**
  * clear_bit - Clears a bit in memory
  * @nr: Bit to clear
  * @addr: Address to start counting from
- *
- * clear_bit() is atomic and may not be reordered.  However, it does
- * not contain a memory barrier, so if it is used for locking purposes,
- * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
- * in order to ensure changes are visible on other processors.
  */
 static __inline__ void
 clear_bit (int nr, volatile void *addr)
 {
-	__u32 mask, old, new;
-	volatile __u32 *m;
-	CMPXCHG_BUGCHECK_DECL
-
-	m = (volatile __u32 *) addr + (nr >> 5);
-	mask = ~(1 << (nr & 31));
-	do {
-		CMPXCHG_BUGCHECK(m);
-		old = *m;
-		new = old & mask;
-	} while (cmpxchg_acq(m, old, new) != old);
+	clear_bit_mode(nr, addr, MODE_ATOMIC);
 }
 
-/**
- * __clear_bit - Clears a bit in memory (non-atomic version)
- */
 static __inline__ void
 __clear_bit (int nr, volatile void *addr)
 {
-	volatile __u32 *p = (__u32 *) addr + (nr >> 5);
-	__u32 m = 1 << (nr & 31);
-	*p &= ~m;
+	clear_bit_mode(nr, addr, MODE_NON_ATOMIC);
 }
 
 /**
  * change_bit - Toggle a bit in memory
  * @nr: Bit to clear
  * @addr: Address to start counting from
- *
- * change_bit() is atomic and may not be reordered.
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
  */
 static __inline__ void
 change_bit (int nr, volatile void *addr)
 {
-	__u32 bit, old, new;
-	volatile __u32 *m;
-	CMPXCHG_BUGCHECK_DECL
-
-	m = (volatile __u32 *) addr + (nr >> 5);
-	bit = (1 << (nr & 31));
-	do {
-		CMPXCHG_BUGCHECK(m);
-		old = *m;
-		new = old ^ bit;
-	} while (cmpxchg_acq(m, old, new) != old);
+	change_bit_mode(nr, addr, MODE_ATOMIC);
 }
 
-/**
- * __change_bit - Toggle a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * Unlike change_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
 static __inline__ void
 __change_bit (int nr, volatile void *addr)
 {
-	*((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
+	change_bit_mode(nr, addr, MODE_NON_ATOMIC);
 }
 
 /**
  * test_and_set_bit - Set a bit and return its old value
  * @nr: Bit to set
  * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.  
- * It also implies a memory barrier.
  */
 static __inline__ int
 test_and_set_bit (int nr, volatile void *addr)
 {
-	__u32 bit, old, new;
-	volatile __u32 *m;
-	CMPXCHG_BUGCHECK_DECL
-
-	m = (volatile __u32 *) addr + (nr >> 5);
-	bit = 1 << (nr & 31);
-	do {
-		CMPXCHG_BUGCHECK(m);
-		old = *m;
-		new = old | bit;
-	} while (cmpxchg_acq(m, old, new) != old);
-	return (old & bit) != 0;
+	return test_and_set_bit_mode(nr, addr, MODE_ATOMIC);
 }
 
-/**
- * __test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.  
- * If two examples of this operation race, one can appear to succeed
- * but actually fail.  You must protect multiple accesses with a lock.
- */
 static __inline__ int
 __test_and_set_bit (int nr, volatile void *addr)
 {
-	__u32 *p = (__u32 *) addr + (nr >> 5);
-	__u32 m = 1 << (nr & 31);
-	int oldbitset = (*p & m) != 0;
-
-	*p |= m;
-	return oldbitset;
+	return test_and_set_bit_mode(nr, addr, MODE_NON_ATOMIC);
 }
 
 /**
  * test_and_clear_bit - Clear a bit and return its old value
  * @nr: Bit to set
  * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.  
- * It also implies a memory barrier.
  */
 static __inline__ int
 test_and_clear_bit (int nr, volatile void *addr)
 {
-	__u32 mask, old, new;
-	volatile __u32 *m;
-	CMPXCHG_BUGCHECK_DECL
-
-	m = (volatile __u32 *) addr + (nr >> 5);
-	mask = ~(1 << (nr & 31));
-	do {
-		CMPXCHG_BUGCHECK(m);
-		old = *m;
-		new = old & mask;
-	} while (cmpxchg_acq(m, old, new) != old);
-	return (old & ~mask) != 0;
+	return test_and_clear_bit_mode(nr, addr, MODE_ATOMIC);
 }
 
-/**
- * __test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.  
- * If two examples of this operation race, one can appear to succeed
- * but actually fail.  You must protect multiple accesses with a lock.
- */
 static __inline__ int
 __test_and_clear_bit(int nr, volatile void * addr)
 {
-	__u32 *p = (__u32 *) addr + (nr >> 5);
-	__u32 m = 1 << (nr & 31);
-	int oldbitset = *p & m;
-
-	*p &= ~m;
-	return oldbitset;
+	return test_and_clear_bit_mode(nr, addr, MODE_NON_ATOMIC);
 }
 
 /**
  * test_and_change_bit - Change a bit and return its old value
  * @nr: Bit to set
  * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.  
- * It also implies a memory barrier.
  */
 static __inline__ int
 test_and_change_bit (int nr, volatile void *addr)
 {
-	__u32 bit, old, new;
-	volatile __u32 *m;
-	CMPXCHG_BUGCHECK_DECL
-
-	m = (volatile __u32 *) addr + (nr >> 5);
-	bit = (1 << (nr & 31));
-	do {
-		CMPXCHG_BUGCHECK(m);
-		old = *m;
-		new = old ^ bit;
-	} while (cmpxchg_acq(m, old, new) != old);
-	return (old & bit) != 0;
+	return test_and_change_bit_mode(nr, addr, MODE_ATOMIC);
 }
 
-/*
- * WARNING: non atomic version.
- */
 static __inline__ int
 __test_and_change_bit (int nr, void *addr)
 {
-	__u32 old, bit = (1 << (nr & 31));
-	__u32 *m = (__u32 *) addr + (nr >> 5);
-
-	old = *m;
-	*m = old ^ bit;
-	return (old & bit) != 0;
+	return test_and_change_bit_mode(nr, addr, MODE_NON_ATOMIC);
 }
 
 static __inline__ int
Index: linux-2.6.16-mm2/include/asm-ia64/bitops_mode.h
===================================================================
--- /dev/null	1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.16-mm2/include/asm-ia64/bitops_mode.h	2006-03-30 12:50:15.000000000 -0800
@@ -0,0 +1,201 @@
+#ifndef _ASM_IA64_BITOPS_MODE_H
+#define _ASM_IA64_BITOPS_MODE_H
+
+/*
+ * Copyright (C) 2006 Silicon Graphics, Incorporated
+ *	Christoph Lameter <christoph@lameter.com>
+ *
+ * Bit operations with the ability to specify the synchronization mode
+ */
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <asm/intrinsics.h>
+
+#define MODE_NON_ATOMIC 0
+#define MODE_ATOMIC 1
+#define MODE_ACQUIRE 2
+#define MODE_RELEASE 3
+#define MODE_BARRIER 4
+
+static __inline__ __u32 cmpxchg_mode(volatile __u32 *m, __u32 old, __u32 new, int mode)
+{
+	switch (mode) {
+	case MODE_ATOMIC :
+	case MODE_ACQUIRE :
+		return cmpxchg_acq(m, old, new);
+
+	case MODE_RELEASE :
+		return cmpxchg_rel(m, old, new);
+
+	case MODE_BARRIER :
+		cmpxchg_rel(m, old, new);
+		return *m;	/* Volatile load = acquire */
+	}
+}
+
+
+/**
+ * set_bit_mode - set a bit in memory
+ *
+ * The address must be (at least) "long" aligned.
+ * Note that there are driver (e.g., eepro100) which use these operations to
+ * operate on hw-defined data-structures, so we can't easily change these
+ * operations to force a bigger alignment.
+ *
+ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
+ */
+static __inline__ void
+set_bit_mode (int nr, volatile void *addr, int mode)
+{
+	__u32 bit, old, new;
+	volatile __u32 *m;
+	CMPXCHG_BUGCHECK_DECL
+
+	m = (volatile __u32 *) addr + (nr >> 5);
+	bit = 1 << (nr & 31);
+
+	if (mode == MODE_NON_ATOMIC) {
+		*m |= bit;
+		return;
+	}
+
+	do {
+		CMPXCHG_BUGCHECK(m);
+		old = *m;
+		new = old | bit;
+	} while (cmpxchg_mode(m, old, new, mode) != old);
+}
+
+/**
+ * clear_bit_mode - Clears a bit in memory
+ */
+static __inline__ void
+clear_bit_mode (int nr, volatile void *addr, int mode)
+{
+	__u32 mask, old, new;
+	volatile __u32 *m;
+	CMPXCHG_BUGCHECK_DECL
+
+	m = (volatile __u32 *) addr + (nr >> 5);
+	mask = ~(1 << (nr & 31));
+
+	if (mode == MODE_NON_ATOMIC) {
+		*m &= ~mask;
+		return;
+	}
+
+	do {
+		CMPXCHG_BUGCHECK(m);
+		old = *m;
+		new = old & mask;
+	} while (cmpxchg_mode(m, old, new, mode) != old);
+}
+
+/**
+ * change_bit_mode - Toggle a bit in memory
+ */
+static __inline__ void
+change_bit_mode (int nr, volatile void *addr, int mode)
+{
+	__u32 bit, old, new;
+	volatile __u32 *m;
+	CMPXCHG_BUGCHECK_DECL
+
+	m = (volatile __u32 *) addr + (nr >> 5);
+	bit = (1 << (nr & 31));
+
+	if (mode == MODE_NON_ATOMIC) {
+		*m ^= bit;
+		return;
+	}
+
+	do {
+		CMPXCHG_BUGCHECK(m);
+		old = *m;
+		new = old ^ bit;
+	} while (cmpxchg_acq(m, old, new) != old);
+}
+
+/**
+ * test_and_set_bit_mode - Set a bit and return its old value
+ */
+static __inline__ int
+test_and_set_bit_mode (int nr, volatile void *addr, int mode)
+{
+	__u32 bit, old, new;
+	volatile __u32 *m;
+	CMPXCHG_BUGCHECK_DECL
+
+	m = (volatile __u32 *) addr + (nr >> 5);
+	bit = 1 << (nr & 31);
+
+	if (mode == MODE_NON_ATOMIC) {
+		int oldbitset = *m & bit;
+		*m |= bit;
+		return oldbitset;
+	}
+
+	do {
+		CMPXCHG_BUGCHECK(m);
+		old = *m;
+		new = old | bit;
+	} while (cmpxchg_acq(m, old, new) != old);
+	return (old & bit) != 0;
+}
+
+/**
+ * test_and_clear_bit_mode - Clear a bit and return its old value
+ */
+static __inline__ int
+test_and_clear_bit_mode (int nr, volatile void *addr, int mode)
+{
+	__u32 mask, old, new;
+	volatile __u32 *m;
+	CMPXCHG_BUGCHECK_DECL
+
+	m = (volatile __u32 *) addr + (nr >> 5);
+	mask = ~(1 << (nr & 31));
+
+	if (mode == MODE_NON_ATOMIC) {
+		int oldbitset = *m & mask;
+		*m &= ~mask;
+		return oldbitset;
+	}
+
+	do {
+		CMPXCHG_BUGCHECK(m);
+		old = *m;
+		new = old & mask;
+	} while (cmpxchg_acq(m, old, new) != old);
+	return (old & ~mask) != 0;
+}
+
+/**
+ * test_and_change_bit_mode - Change a bit and return its old value
+ */
+static __inline__ int
+test_and_change_bit_mode (int nr, volatile void *addr, int mode)
+{
+	__u32 bit, old, new;
+	volatile __u32 *m;
+	CMPXCHG_BUGCHECK_DECL
+
+	m = (volatile __u32 *) addr + (nr >> 5);
+	bit = (1 << (nr & 31));
+
+	if (mode == MODE_NON_ATOMIC) {
+		old = *m;
+		*m = old ^ bit;
+		return (old & bit) != 0;
+	}
+
+	do {
+		CMPXCHG_BUGCHECK(m);
+		old = *m;
+		new = old ^ bit;
+	} while (cmpxchg_acq(m, old, new) != old);
+	return (old & bit) != 0;
+}
+
+#endif /* _ASM_IA64_BITOPS_MODE_H */
Index: linux-2.6.16-mm2/include/asm-generic/bitops_mode.h
===================================================================
--- /dev/null	1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.16-mm2/include/asm-generic/bitops_mode.h	2006-03-30 12:50:15.000000000 -0800
@@ -0,0 +1,220 @@
+#ifndef _ASM_IA64_BITOPS_MODE_H
+#define _ASM_IA64_BITOPS_MODE_H
+
+/*
+ * Copyright (C) 2006 Silicon Graphics, Incorporated
+ *	Christoph Lameter <christoph@lameter.com>
+ *
+ * Fallback logic for bit operations with synchronization mode
+ */
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <asm/intrinsics.h>
+
+#define MODE_NON_ATOMIC 0
+#define MODE_ATOMIC 1
+#define MODE_ACQUIRE 2
+#define MODE_RELEASE 3
+#define MODE_BARRIER 4
+
+/**
+ * set_bit_mode - Set a bit in memory
+ *
+ * The address must be (at least) "long" aligned.
+ * Note that there are driver (e.g., eepro100) which use these operations to
+ * operate on hw-defined data-structures, so we can't easily change these
+ * operations to force a bigger alignment.
+ *
+ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
+ */
+static __inline__ void
+set_bit_mode (int nr, volatile void *addr, int mode)
+{
+	switch (mode) {
+	case MODE_NON_ATOMIC:
+		__set_bit(nr,addr);
+		return;
+
+	case MODE_ATOMIC:
+		set_bit(nr,addr);
+		return;
+
+	case MODE_ACQUIRE:
+		set_bit(nr,addr);
+		smp_mb();
+		return;
+
+	case MODE_RELEASE:
+		smb_mb();
+		set_bit(nr,addr);
+		return;
+
+	case MODE_BARRIER:
+		smb_mb();
+		set_bit(nr,addr);
+		smb_mb();
+		return;
+	}
+}
+
+/**
+ * clear_bit_mode - Clears a bit in memory
+ */
+static __inline__ void
+clear_bit_mode (int nr, volatile void *addr, int mode)
+{
+	switch (mode) {
+	case MODE_NON_ATOMIC:
+		__clear_bit(nr,addr);
+		return;
+
+	case MODE_ATOMIC:
+		clear_bit(nr,addr);
+		return;
+
+	case MODE_ACQUIRE:
+		clear_bit(nr,addr);
+		smp_mb();
+		return;
+
+	case MODE_RELEASE:
+		smb_mb();
+		clear_bit(nr,addr);
+		return;
+
+	case MODE_BARRIER:
+		smb_mb();
+		clear_bit(nr,addr);
+		smb_mb();
+		return;
+	}
+}
+
+/**
+ * change_bit_mode - Toggle a bit in memory
+ */
+static __inline__ void
+change_bit_mode (int nr, volatile void *addr, int mode)
+{
+	switch (mode) {
+	case MODE_NON_ATOMIC:
+		__change_bit(nr,addr);
+		return;
+
+	case MODE_ATOMIC:
+		change_bit(nr,addr);
+		return;
+
+	case MODE_ACQUIRE:
+		change_bit(nr,addr);
+		smp_mb();
+		return;
+
+	case MODE_RELEASE:
+		smb_mb();
+		change_bit(nr,addr);
+		return;
+
+	case MODE_BARRIER:
+		smb_mb();
+		change_bit(nr,addr);
+		smb_mb();
+		return;
+	}
+}
+
+/**
+ * test_and_set_bit_mode - Set a bit and return its old value
+ */
+static __inline__ int
+test_and_set_bit_mode (int nr, volatile void *addr, int mode)
+{
+	int x;
+	switch (mode) {
+	case MODE_NON_ATOMIC:
+		return __test_and_set_bit(nr,addr);
+
+	case MODE_ATOMIC:
+	 	return test_and_set_bit(nr,addr);
+
+	case MODE_ACQUIRE:
+		x = test_and_set_bit(nr,addr);
+		smp_mb();
+		return x;
+
+	case MODE_RELEASE:
+		smb_mb();
+		return test_and_set_bit(nr,addr);
+
+	case MODE_BARRIER:
+		smb_mb();
+		x = test_and_set_bit(nr,addr);
+		smb_mb();
+		return x;
+	}
+}
+
+/**
+ * test_and_clear_bit - Clear a bit and return its old value
+ */
+static __inline__ int
+test_and_clear_bit_mode (int nr, volatile void *addr, int mode)
+{
+	int x;
+	switch (mode) {
+	case MODE_NON_ATOMIC:
+		return __test_and_clear_bit(nr,addr);
+
+	case MODE_ATOMIC:
+	 	return test_and_clear_bit(nr,addr);
+
+	case MODE_ACQUIRE:
+		x = test_and_clear_bit(nr,addr);
+		smp_mb();
+		return x;
+
+	case MODE_RELEASE:
+		smb_mb();
+		return test_and_clear_bit(nr,addr);
+
+	case MODE_BARRIER:
+		smb_mb();
+		x = test_and_set_bit(nr,addr);
+		smb_mb();
+		return x;
+	}
+}
+
+/**
+ * test_and_change_bit - Change a bit and return its old value
+ */
+static __inline__ int
+test_and_change_bit_mode (int nr, volatile void *addr, int mode)
+{
+	int x;
+	switch (mode) {
+	case MODE_NON_ATOMIC:
+		return __test_and_change_bit(nr,addr);
+
+	case MODE_ATOMIC:
+	 	return test_and_change_bit(nr,addr);
+
+	case MODE_ACQUIRE:
+		x = test_and_change_bit(nr,addr);
+		smp_mb();
+		return x;
+
+	case MODE_RELEASE:
+		smb_mb();
+		return test_and_change_bit(nr,addr);
+
+	case MODE_BARRIER:
+		smb_mb();
+		x = test_and_change_bit(nr,addr);
+		smb_mb();
+		return x;
+	}
+}
+
+#endif /* _ASM_IA64_BITOPS_MODE_H */

^ permalink raw reply	[flat|nested] 62+ messages in thread
* RE: Synchronizing Bit operations V2
@ 2006-03-31  0:56 Luck, Tony
  2006-03-31  0:58 ` Christoph Lameter
  0 siblings, 1 reply; 62+ messages in thread
From: Luck, Tony @ 2006-03-31  0:56 UTC (permalink / raw)
  To: Christoph Lameter, David Mosberger-Tang
  Cc: Nick Piggin, Zoltan Menyhart, Boehm, Hans, Grundler, Grant G,
	Chen, Kenneth W, akpm, linux-kernel, linux-ia64

> Also some higher level functions may want to have the mode passed to them 
> as parameters. See f.e. include/linux/buffer_head.h. Without the 
> parameters you will have to maintain farms of definitions for all cases.

But if any part of the call chain from those higher level functions
down to these low level functions is not inline, then the compiler
won't be able to collapse out the "switch (mode)" ... so we'd end up
with a ton of extra object code.

-Tony

^ permalink raw reply	[flat|nested] 62+ messages in thread
* Re: Synchronizing Bit operations V2
@ 2006-03-31  1:33 linux
  2006-03-31  1:40 ` Christoph Lameter
  0 siblings, 1 reply; 62+ messages in thread
From: linux @ 2006-03-31  1:33 UTC (permalink / raw)
  To: clameter; +Cc: linux-ia64, linux-kernel

> The following patchset implements the ability to specify a
> synchronization mode for bit operations.
>
> I.e. instead of set_bit(x,y) we can do set_bit(x,y, mode).
> 
> The following modes are supported:

Yuck.

The only conceivable reason for passing the mode as a separate parameter is
- To change the mode dynamically at run time.
- To share common code when the sequence is long and mostly shared
  between the various modes (as in open(2) or ll_rw_block()).

I sincerely hope neither of those apply in this case.

On the downside, it's more typing and uglier than a series of

frob_bit_nonatomic()
	(probably temporarily or permanently aliased to frob_bit())
frob_bit_atomic()
frob_bit_acquire()
frob_bit_release()
frob_bit_barrier()

functions, and those also prevent you from doing something silly like
frob_bit(x, y, O_DIRECT).  Also, the MODE_ prefix might be wanted by
something else.

^ permalink raw reply	[flat|nested] 62+ messages in thread
* RE: Synchronizing Bit operations V2
@ 2006-03-31  2:51 Chen, Kenneth W
  2006-03-31  2:55 ` Christoph Lameter
  0 siblings, 1 reply; 62+ messages in thread
From: Chen, Kenneth W @ 2006-03-31  2:51 UTC (permalink / raw)
  To: Chen, Kenneth W, 'Christoph Lameter'
  Cc: 'Nick Piggin', 'Zoltan Menyhart',
	'Boehm, Hans', 'Grundler, Grant G', akpm,
	linux-kernel, linux-ia64

Chen, Kenneth W wrote on Thursday, March 30, 2006 6:45 PM
> Christoph Lameter wrote on Thursday, March 30, 2006 6:38 PM
> > > > Neither one is correct because there will always be one combination of 
> > > > clear_bit with these macros that does not generate the required memory 
> > > > barrier.
> > > 
> > > Can you give an example?  Which combination?
> > 
> > For Option(1)
> > 
> > smp_mb__before_clear_bit()
> > clear_bit(...)(
> 
> Sorry, you totally lost me.  It could me I'm extremely slow today.  For
> option (1), on ia64, clear_bit has release semantic already.  The comb
> of __before_clear_bit + clear_bit provides the required ordering.  Did
> I miss something?  By the way, we are talking about detail implementation
> on one specific architecture.  Not some generic concept that clear_bit
> has no ordering stuff in there.

By the way, this is the same thing on x86: look at include/asm-i386/bitops.h:

#define smp_mb__before_clear_bit()      barrier()
#define smp_mb__after_clear_bit()       barrier()

A simple compiler barrier, nothing but
#define barrier() __asm__ __volatile__("": : :"memory")

See, no memory ordering there, because clear_bit already has a LOCK prefix.

- Ken

^ permalink raw reply	[flat|nested] 62+ messages in thread

end of thread, other threads:[~2006-04-02  8:00 UTC | newest]

Thread overview: 62+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2006-03-30 21:02 Bit operations with the ability to specify a synchronization mode Christoph Lameter
2006-03-31  0:18 ` Synchronizing Bit operations V2 Christoph Lameter
2006-03-31  0:39   ` Chen, Kenneth W
2006-03-31  0:42     ` Christoph Lameter
2006-03-31  0:53       ` Chen, Kenneth W
2006-03-31  0:55         ` Christoph Lameter
2006-03-31  1:04           ` Chen, Kenneth W
2006-03-31  1:13             ` Christoph Lameter
2006-03-31  1:29               ` Chen, Kenneth W
2006-03-31  1:37                 ` Christoph Lameter
2006-03-31  2:35                   ` Chen, Kenneth W
2006-03-31  2:37                     ` Christoph Lameter
2006-03-31  2:45                       ` Chen, Kenneth W
2006-03-31  2:53                         ` Nick Piggin
2006-03-31  6:15                           ` Chen, Kenneth W
2006-03-31  7:34                             ` Nick Piggin
2006-03-31 18:57                               ` Chen, Kenneth W
2006-03-31 19:41                               ` Chen, Kenneth W
2006-03-31 21:15                                 ` Christoph Lameter
2006-03-31 21:24                                   ` Chen, Kenneth W
2006-03-31 21:28                                     ` Christoph Lameter
2006-04-01  2:16                                       ` Nick Piggin
2006-03-31  3:01                         ` Christoph Lameter
2006-03-31  3:10                           ` Chen, Kenneth W
2006-03-31  3:12                             ` Christoph Lameter
2006-03-31  3:14                               ` Chen, Kenneth W
2006-03-31  3:20                                 ` Christoph Lameter
2006-03-31  3:37                                   ` Chen, Kenneth W
2006-03-31  3:17                               ` Chen, Kenneth W
2006-03-31  3:23                               ` Chen, Kenneth W
2006-03-31  0:45     ` Christoph Lameter
2006-03-31  0:50       ` Chen, Kenneth W
2006-03-31  0:51         ` Christoph Lameter
2006-03-31  0:59           ` Chen, Kenneth W
2006-03-31  1:09             ` Christoph Lameter
2006-03-31  1:13               ` Chen, Kenneth W
2006-03-31  0:42   ` David Mosberger-Tang
2006-03-31  0:49     ` Christoph Lameter
2006-03-31  6:10     ` Chris Wright
2006-03-31  0:44   ` Nick Piggin
2006-03-31  3:28     ` Christoph Lameter
2006-03-31  4:12       ` Nick Piggin
2006-03-31 17:43         ` Christoph Lameter
2006-04-01  2:56           ` Nick Piggin
2006-03-31 13:28   ` Andi Kleen
2006-03-31 16:22     ` Hans Boehm
2006-03-31 16:37       ` Andi Kleen
2006-03-31 17:46         ` Christoph Lameter
2006-03-31 17:45     ` Christoph Lameter
2006-03-31 17:48       ` Andi Kleen
2006-03-31 17:56         ` Christoph Lameter
2006-04-02  7:54           ` Russell King
  -- strict thread matches above, loose matches on Subject: below --
2006-03-31  0:56 Luck, Tony
2006-03-31  0:58 ` Christoph Lameter
2006-04-02  7:59   ` Russell King
2006-03-31  1:33 linux
2006-03-31  1:40 ` Christoph Lameter
2006-03-31  2:51 Chen, Kenneth W
2006-03-31  2:55 ` Christoph Lameter
2006-03-31  3:02   ` Chen, Kenneth W
2006-03-31  3:08     ` Christoph Lameter
2006-03-31  3:11       ` Chen, Kenneth W

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox