public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [GIT PULL] More cleanups for atomic memory operations/spinlocks
@ 2011-11-09  1:17 Jeremy Fitzhardinge
  2011-11-11 20:03 ` Jeremy Fitzhardinge
  2011-12-05  9:56 ` Ingo Molnar
  0 siblings, 2 replies; 4+ messages in thread
From: Jeremy Fitzhardinge @ 2011-11-09  1:17 UTC (permalink / raw)
  To: H. Peter Anvin, Ingo Molnar
  Cc: the arch/x86 maintainers, Linux Kernel Mailing List,
	xen-devel@lists.xensource.com, Konrad Rzeszutek Wilk

I forgot to push these for the just-closed merge window, but they're
fine for the next one.  Could you find them a home in tip.git?

Thanks,
    J

The following changes since commit 1ea6b8f48918282bdca0b32a34095504ee65bab5:

  Linux 3.2-rc1 (2011-11-07 16:16:02 -0800)

are available in the git repository at:
  git://git.kernel.org/pub/scm/linux/kernel/git/jeremy/xen.git upstream/ticketlock-cleanup

Jeremy Fitzhardinge (2):
      x86/cmpxchg: add a locked add() helper
      x86: consolidate xchg and xadd macros

 arch/x86/include/asm/cmpxchg.h  |  140 +++++++++++++++++++-------------------
 arch/x86/include/asm/spinlock.h |   15 +----
 2 files changed, 71 insertions(+), 84 deletions(-)

diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index 5d3acdf..5488e10 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void)
 	__compiletime_error("Bad argument size for cmpxchg");
 extern void __xadd_wrong_size(void)
 	__compiletime_error("Bad argument size for xadd");
+extern void __add_wrong_size(void)
+	__compiletime_error("Bad argument size for add");
 
 /*
  * Constants for operation sizes. On 32-bit, the 64-bit size it set to
@@ -31,60 +33,47 @@ extern void __xadd_wrong_size(void)
 #define	__X86_CASE_Q	-1		/* sizeof will never return -1 */
 #endif
 
+/* 
+ * An exchange-type operation, which takes a value and a pointer, and
+ * returns a the old value.
+ */
+#define __xchg_op(ptr, arg, op, lock)					\
+	({								\
+	        __typeof__ (*(ptr)) __ret = (arg);			\
+		switch (sizeof(*(ptr))) {				\
+		case __X86_CASE_B:					\
+			asm volatile (lock #op "b %b0, %1\n"		\
+				      : "+r" (__ret), "+m" (*(ptr))	\
+				      : : "memory", "cc");		\
+			break;						\
+		case __X86_CASE_W:					\
+			asm volatile (lock #op "w %w0, %1\n"		\
+				      : "+r" (__ret), "+m" (*(ptr))	\
+				      : : "memory", "cc");		\
+			break;						\
+		case __X86_CASE_L:					\
+			asm volatile (lock #op "l %0, %1\n"		\
+				      : "+r" (__ret), "+m" (*(ptr))	\
+				      : : "memory", "cc");		\
+			break;						\
+		case __X86_CASE_Q:					\
+			asm volatile (lock #op "q %q0, %1\n"		\
+				      : "+r" (__ret), "+m" (*(ptr))	\
+				      : : "memory", "cc");		\
+			break;						\
+		default:						\
+			__ ## op ## _wrong_size();			\
+		}							\
+		__ret;							\
+	})
+
 /*
  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
  * Since this is generally used to protect other memory information, we
  * use "asm volatile" and "memory" clobbers to prevent gcc from moving
  * information around.
  */
-#define __xchg(x, ptr, size)						\
-({									\
-	__typeof(*(ptr)) __x = (x);					\
-	switch (size) {							\
-	case __X86_CASE_B:						\
-	{								\
-		volatile u8 *__ptr = (volatile u8 *)(ptr);		\
-		asm volatile("xchgb %0,%1"				\
-			     : "=q" (__x), "+m" (*__ptr)		\
-			     : "0" (__x)				\
-			     : "memory");				\
-		break;							\
-	}								\
-	case __X86_CASE_W:						\
-	{								\
-		volatile u16 *__ptr = (volatile u16 *)(ptr);		\
-		asm volatile("xchgw %0,%1"				\
-			     : "=r" (__x), "+m" (*__ptr)		\
-			     : "0" (__x)				\
-			     : "memory");				\
-		break;							\
-	}								\
-	case __X86_CASE_L:						\
-	{								\
-		volatile u32 *__ptr = (volatile u32 *)(ptr);		\
-		asm volatile("xchgl %0,%1"				\
-			     : "=r" (__x), "+m" (*__ptr)		\
-			     : "0" (__x)				\
-			     : "memory");				\
-		break;							\
-	}								\
-	case __X86_CASE_Q:						\
-	{								\
-		volatile u64 *__ptr = (volatile u64 *)(ptr);		\
-		asm volatile("xchgq %0,%1"				\
-			     : "=r" (__x), "+m" (*__ptr)		\
-			     : "0" (__x)				\
-			     : "memory");				\
-		break;							\
-	}								\
-	default:							\
-		__xchg_wrong_size();					\
-	}								\
-	__x;								\
-})
-
-#define xchg(ptr, v)							\
-	__xchg((v), (ptr), sizeof(*ptr))
+#define xchg(ptr, v)	__xchg_op((ptr), (v), xchg, "")
 
 /*
  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
@@ -165,46 +154,57 @@ extern void __xadd_wrong_size(void)
 	__cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
 #endif
 
-#define __xadd(ptr, inc, lock)						\
+/*
+ * xadd() adds "inc" to "*ptr" and atomically returns the previous
+ * value of "*ptr".
+ *
+ * xadd() is locked when multiple CPUs are online
+ * xadd_sync() is always locked
+ * xadd_local() is never locked
+ */
+#define __xadd(ptr, inc, lock)	__xchg_op((ptr), (inc), xadd, lock)
+#define xadd(ptr, inc)		__xadd((ptr), (inc), LOCK_PREFIX)
+#define xadd_sync(ptr, inc)	__xadd((ptr), (inc), "lock; ")
+#define xadd_local(ptr, inc)	__xadd((ptr), (inc), "")
+
+#define __add(ptr, inc, lock)						\
 	({								\
 	        __typeof__ (*(ptr)) __ret = (inc);			\
 		switch (sizeof(*(ptr))) {				\
 		case __X86_CASE_B:					\
-			asm volatile (lock "xaddb %b0, %1\n"		\
-				      : "+r" (__ret), "+m" (*(ptr))	\
-				      : : "memory", "cc");		\
+			asm volatile (lock "addb %b1, %0\n"		\
+				      : "+m" (*(ptr)) : "ri" (inc)	\
+				      : "memory", "cc");		\
 			break;						\
 		case __X86_CASE_W:					\
-			asm volatile (lock "xaddw %w0, %1\n"		\
-				      : "+r" (__ret), "+m" (*(ptr))	\
-				      : : "memory", "cc");		\
+			asm volatile (lock "addw %w1, %0\n"		\
+				      : "+m" (*(ptr)) : "ri" (inc)	\
+				      : "memory", "cc");		\
 			break;						\
 		case __X86_CASE_L:					\
-			asm volatile (lock "xaddl %0, %1\n"		\
-				      : "+r" (__ret), "+m" (*(ptr))	\
-				      : : "memory", "cc");		\
+			asm volatile (lock "addl %1, %0\n"		\
+				      : "+m" (*(ptr)) : "ri" (inc)	\
+				      : "memory", "cc");		\
 			break;						\
 		case __X86_CASE_Q:					\
-			asm volatile (lock "xaddq %q0, %1\n"		\
-				      : "+r" (__ret), "+m" (*(ptr))	\
-				      : : "memory", "cc");		\
+			asm volatile (lock "addq %1, %0\n"		\
+				      : "+m" (*(ptr)) : "ri" (inc)	\
+				      : "memory", "cc");		\
 			break;						\
 		default:						\
-			__xadd_wrong_size();				\
+			__add_wrong_size();				\
 		}							\
 		__ret;							\
 	})
 
 /*
- * xadd() adds "inc" to "*ptr" and atomically returns the previous
- * value of "*ptr".
+ * add_*() adds "inc" to "*ptr"
  *
- * xadd() is locked when multiple CPUs are online
- * xadd_sync() is always locked
- * xadd_local() is never locked
+ * __add() takes a lock prefix
+ * add_smp() is locked when multiple CPUs are online
+ * add_sync() is always locked
  */
-#define xadd(ptr, inc)		__xadd((ptr), (inc), LOCK_PREFIX)
-#define xadd_sync(ptr, inc)	__xadd((ptr), (inc), "lock; ")
-#define xadd_local(ptr, inc)	__xadd((ptr), (inc), "")
+#define add_smp(ptr, inc)	__add((ptr), (inc), LOCK_PREFIX)
+#define add_sync(ptr, inc)	__add((ptr), (inc), "lock; ")
 
 #endif	/* ASM_X86_CMPXCHG_H */
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 972c260..a82c2bf 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -79,23 +79,10 @@ static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
 	return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
 }
 
-#if (NR_CPUS < 256)
 static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
 {
-	asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
-		     : "+m" (lock->head_tail)
-		     :
-		     : "memory", "cc");
+	__add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX);
 }
-#else
-static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
-{
-	asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
-		     : "+m" (lock->head_tail)
-		     :
-		     : "memory", "cc");
-}
-#endif
 
 static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
 {



^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [GIT PULL] More cleanups for atomic memory operations/spinlocks
  2011-11-09  1:17 [GIT PULL] More cleanups for atomic memory operations/spinlocks Jeremy Fitzhardinge
@ 2011-11-11 20:03 ` Jeremy Fitzhardinge
  2011-12-05  9:56 ` Ingo Molnar
  1 sibling, 0 replies; 4+ messages in thread
From: Jeremy Fitzhardinge @ 2011-11-11 20:03 UTC (permalink / raw)
  To: H. Peter Anvin, Ingo Molnar
  Cc: the arch/x86 maintainers, Linux Kernel Mailing List,
	xen-devel@lists.xensource.com, Konrad Rzeszutek Wilk

Ping?

On 11/08/2011 05:17 PM, Jeremy Fitzhardinge wrote:
> I forgot to push these for the just-closed merge window, but they're
> fine for the next one.  Could you find them a home in tip.git?
>
> Thanks,
>     J
>
> The following changes since commit 1ea6b8f48918282bdca0b32a34095504ee65bab5:
>
>   Linux 3.2-rc1 (2011-11-07 16:16:02 -0800)
>
> are available in the git repository at:
>   git://git.kernel.org/pub/scm/linux/kernel/git/jeremy/xen.git upstream/ticketlock-cleanup
>
> Jeremy Fitzhardinge (2):
>       x86/cmpxchg: add a locked add() helper
>       x86: consolidate xchg and xadd macros
>
>  arch/x86/include/asm/cmpxchg.h  |  140 +++++++++++++++++++-------------------
>  arch/x86/include/asm/spinlock.h |   15 +----
>  2 files changed, 71 insertions(+), 84 deletions(-)
>
> diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
> index 5d3acdf..5488e10 100644
> --- a/arch/x86/include/asm/cmpxchg.h
> +++ b/arch/x86/include/asm/cmpxchg.h
> @@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void)
>  	__compiletime_error("Bad argument size for cmpxchg");
>  extern void __xadd_wrong_size(void)
>  	__compiletime_error("Bad argument size for xadd");
> +extern void __add_wrong_size(void)
> +	__compiletime_error("Bad argument size for add");
>  
>  /*
>   * Constants for operation sizes. On 32-bit, the 64-bit size it set to
> @@ -31,60 +33,47 @@ extern void __xadd_wrong_size(void)
>  #define	__X86_CASE_Q	-1		/* sizeof will never return -1 */
>  #endif
>  
> +/* 
> + * An exchange-type operation, which takes a value and a pointer, and
> + * returns a the old value.
> + */
> +#define __xchg_op(ptr, arg, op, lock)					\
> +	({								\
> +	        __typeof__ (*(ptr)) __ret = (arg);			\
> +		switch (sizeof(*(ptr))) {				\
> +		case __X86_CASE_B:					\
> +			asm volatile (lock #op "b %b0, %1\n"		\
> +				      : "+r" (__ret), "+m" (*(ptr))	\
> +				      : : "memory", "cc");		\
> +			break;						\
> +		case __X86_CASE_W:					\
> +			asm volatile (lock #op "w %w0, %1\n"		\
> +				      : "+r" (__ret), "+m" (*(ptr))	\
> +				      : : "memory", "cc");		\
> +			break;						\
> +		case __X86_CASE_L:					\
> +			asm volatile (lock #op "l %0, %1\n"		\
> +				      : "+r" (__ret), "+m" (*(ptr))	\
> +				      : : "memory", "cc");		\
> +			break;						\
> +		case __X86_CASE_Q:					\
> +			asm volatile (lock #op "q %q0, %1\n"		\
> +				      : "+r" (__ret), "+m" (*(ptr))	\
> +				      : : "memory", "cc");		\
> +			break;						\
> +		default:						\
> +			__ ## op ## _wrong_size();			\
> +		}							\
> +		__ret;							\
> +	})
> +
>  /*
>   * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
>   * Since this is generally used to protect other memory information, we
>   * use "asm volatile" and "memory" clobbers to prevent gcc from moving
>   * information around.
>   */
> -#define __xchg(x, ptr, size)						\
> -({									\
> -	__typeof(*(ptr)) __x = (x);					\
> -	switch (size) {							\
> -	case __X86_CASE_B:						\
> -	{								\
> -		volatile u8 *__ptr = (volatile u8 *)(ptr);		\
> -		asm volatile("xchgb %0,%1"				\
> -			     : "=q" (__x), "+m" (*__ptr)		\
> -			     : "0" (__x)				\
> -			     : "memory");				\
> -		break;							\
> -	}								\
> -	case __X86_CASE_W:						\
> -	{								\
> -		volatile u16 *__ptr = (volatile u16 *)(ptr);		\
> -		asm volatile("xchgw %0,%1"				\
> -			     : "=r" (__x), "+m" (*__ptr)		\
> -			     : "0" (__x)				\
> -			     : "memory");				\
> -		break;							\
> -	}								\
> -	case __X86_CASE_L:						\
> -	{								\
> -		volatile u32 *__ptr = (volatile u32 *)(ptr);		\
> -		asm volatile("xchgl %0,%1"				\
> -			     : "=r" (__x), "+m" (*__ptr)		\
> -			     : "0" (__x)				\
> -			     : "memory");				\
> -		break;							\
> -	}								\
> -	case __X86_CASE_Q:						\
> -	{								\
> -		volatile u64 *__ptr = (volatile u64 *)(ptr);		\
> -		asm volatile("xchgq %0,%1"				\
> -			     : "=r" (__x), "+m" (*__ptr)		\
> -			     : "0" (__x)				\
> -			     : "memory");				\
> -		break;							\
> -	}								\
> -	default:							\
> -		__xchg_wrong_size();					\
> -	}								\
> -	__x;								\
> -})
> -
> -#define xchg(ptr, v)							\
> -	__xchg((v), (ptr), sizeof(*ptr))
> +#define xchg(ptr, v)	__xchg_op((ptr), (v), xchg, "")
>  
>  /*
>   * Atomic compare and exchange.  Compare OLD with MEM, if identical,
> @@ -165,46 +154,57 @@ extern void __xadd_wrong_size(void)
>  	__cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
>  #endif
>  
> -#define __xadd(ptr, inc, lock)						\
> +/*
> + * xadd() adds "inc" to "*ptr" and atomically returns the previous
> + * value of "*ptr".
> + *
> + * xadd() is locked when multiple CPUs are online
> + * xadd_sync() is always locked
> + * xadd_local() is never locked
> + */
> +#define __xadd(ptr, inc, lock)	__xchg_op((ptr), (inc), xadd, lock)
> +#define xadd(ptr, inc)		__xadd((ptr), (inc), LOCK_PREFIX)
> +#define xadd_sync(ptr, inc)	__xadd((ptr), (inc), "lock; ")
> +#define xadd_local(ptr, inc)	__xadd((ptr), (inc), "")
> +
> +#define __add(ptr, inc, lock)						\
>  	({								\
>  	        __typeof__ (*(ptr)) __ret = (inc);			\
>  		switch (sizeof(*(ptr))) {				\
>  		case __X86_CASE_B:					\
> -			asm volatile (lock "xaddb %b0, %1\n"		\
> -				      : "+r" (__ret), "+m" (*(ptr))	\
> -				      : : "memory", "cc");		\
> +			asm volatile (lock "addb %b1, %0\n"		\
> +				      : "+m" (*(ptr)) : "ri" (inc)	\
> +				      : "memory", "cc");		\
>  			break;						\
>  		case __X86_CASE_W:					\
> -			asm volatile (lock "xaddw %w0, %1\n"		\
> -				      : "+r" (__ret), "+m" (*(ptr))	\
> -				      : : "memory", "cc");		\
> +			asm volatile (lock "addw %w1, %0\n"		\
> +				      : "+m" (*(ptr)) : "ri" (inc)	\
> +				      : "memory", "cc");		\
>  			break;						\
>  		case __X86_CASE_L:					\
> -			asm volatile (lock "xaddl %0, %1\n"		\
> -				      : "+r" (__ret), "+m" (*(ptr))	\
> -				      : : "memory", "cc");		\
> +			asm volatile (lock "addl %1, %0\n"		\
> +				      : "+m" (*(ptr)) : "ri" (inc)	\
> +				      : "memory", "cc");		\
>  			break;						\
>  		case __X86_CASE_Q:					\
> -			asm volatile (lock "xaddq %q0, %1\n"		\
> -				      : "+r" (__ret), "+m" (*(ptr))	\
> -				      : : "memory", "cc");		\
> +			asm volatile (lock "addq %1, %0\n"		\
> +				      : "+m" (*(ptr)) : "ri" (inc)	\
> +				      : "memory", "cc");		\
>  			break;						\
>  		default:						\
> -			__xadd_wrong_size();				\
> +			__add_wrong_size();				\
>  		}							\
>  		__ret;							\
>  	})
>  
>  /*
> - * xadd() adds "inc" to "*ptr" and atomically returns the previous
> - * value of "*ptr".
> + * add_*() adds "inc" to "*ptr"
>   *
> - * xadd() is locked when multiple CPUs are online
> - * xadd_sync() is always locked
> - * xadd_local() is never locked
> + * __add() takes a lock prefix
> + * add_smp() is locked when multiple CPUs are online
> + * add_sync() is always locked
>   */
> -#define xadd(ptr, inc)		__xadd((ptr), (inc), LOCK_PREFIX)
> -#define xadd_sync(ptr, inc)	__xadd((ptr), (inc), "lock; ")
> -#define xadd_local(ptr, inc)	__xadd((ptr), (inc), "")
> +#define add_smp(ptr, inc)	__add((ptr), (inc), LOCK_PREFIX)
> +#define add_sync(ptr, inc)	__add((ptr), (inc), "lock; ")
>  
>  #endif	/* ASM_X86_CMPXCHG_H */
> diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
> index 972c260..a82c2bf 100644
> --- a/arch/x86/include/asm/spinlock.h
> +++ b/arch/x86/include/asm/spinlock.h
> @@ -79,23 +79,10 @@ static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
>  	return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
>  }
>  
> -#if (NR_CPUS < 256)
>  static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
>  {
> -	asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
> -		     : "+m" (lock->head_tail)
> -		     :
> -		     : "memory", "cc");
> +	__add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX);
>  }
> -#else
> -static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
> -{
> -	asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
> -		     : "+m" (lock->head_tail)
> -		     :
> -		     : "memory", "cc");
> -}
> -#endif
>  
>  static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
>  {
>
>


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [GIT PULL] More cleanups for atomic memory operations/spinlocks
  2011-11-09  1:17 [GIT PULL] More cleanups for atomic memory operations/spinlocks Jeremy Fitzhardinge
  2011-11-11 20:03 ` Jeremy Fitzhardinge
@ 2011-12-05  9:56 ` Ingo Molnar
  2011-12-05 10:00   ` H. Peter Anvin
  1 sibling, 1 reply; 4+ messages in thread
From: Ingo Molnar @ 2011-12-05  9:56 UTC (permalink / raw)
  To: Jeremy Fitzhardinge
  Cc: H. Peter Anvin, the arch/x86 maintainers,
	Linux Kernel Mailing List, xen-devel@lists.xensource.com,
	Konrad Rzeszutek Wilk


* Jeremy Fitzhardinge <jeremy@goop.org> wrote:

> I forgot to push these for the just-closed merge window, but they're
> fine for the next one.  Could you find them a home in tip.git?
> 
> Thanks,
>     J
> 
> The following changes since commit 1ea6b8f48918282bdca0b32a34095504ee65bab5:
> 
>   Linux 3.2-rc1 (2011-11-07 16:16:02 -0800)
> 
> are available in the git repository at:
>   git://git.kernel.org/pub/scm/linux/kernel/git/jeremy/xen.git upstream/ticketlock-cleanup
> 
> Jeremy Fitzhardinge (2):
>       x86/cmpxchg: add a locked add() helper
>       x86: consolidate xchg and xadd macros
> 
>  arch/x86/include/asm/cmpxchg.h  |  140 +++++++++++++++++++-------------------
>  arch/x86/include/asm/spinlock.h |   15 +----
>  2 files changed, 71 insertions(+), 84 deletions(-)

Pulled it into x86/asm for testing, thanks Jeremy.

Peter, any objections?

Thanks,

	Ingo

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [GIT PULL] More cleanups for atomic memory operations/spinlocks
  2011-12-05  9:56 ` Ingo Molnar
@ 2011-12-05 10:00   ` H. Peter Anvin
  0 siblings, 0 replies; 4+ messages in thread
From: H. Peter Anvin @ 2011-12-05 10:00 UTC (permalink / raw)
  To: Ingo Molnar
  Cc: Jeremy Fitzhardinge, the arch/x86 maintainers,
	Linux Kernel Mailing List, xen-devel@lists.xensource.com,
	Konrad Rzeszutek Wilk

On 12/05/2011 01:56 AM, Ingo Molnar wrote:
> 
> Pulled it into x86/asm for testing, thanks Jeremy.
> 
> Peter, any objections?
> 

No.  Thank you!

	-hpa


-- 
H. Peter Anvin, Intel Open Source Technology Center
I work for Intel.  I don't speak on their behalf.


^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2011-12-05 10:01 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2011-11-09  1:17 [GIT PULL] More cleanups for atomic memory operations/spinlocks Jeremy Fitzhardinge
2011-11-11 20:03 ` Jeremy Fitzhardinge
2011-12-05  9:56 ` Ingo Molnar
2011-12-05 10:00   ` H. Peter Anvin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox