public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Jeremy Fitzhardinge <jeremy@xensource.com>
To: akpm@osdl.org
Cc: linux-kernel@vger.kernel.org, virtualization@lists.osdl.org,
	xen-devel@lists.xensource.com,
	Jeremy Fitzhardinge <jeremy@goop.org>,
	Ian Pratt <ian.pratt@xensource.com>,
	Christian Limpach <Christian.Limpach@cl.cam.ac.uk>,
	Chris Wright <chrisw@sous-sol.org>,
	Christoph Lameter <clameter@sgi.com>
Subject: [patch 2/8] Implement always-locked bit ops, for memory shared with an SMP hypervisor.
Date: Wed, 02 Aug 2006 17:25:12 -0700	[thread overview]
Message-ID: <20060803002518.061401577@xensource.com> (raw)
In-Reply-To: 20060803002510.634721860@xensource.com

[-- Attachment #1: 002-sync-bitops.patch --]
[-- Type: text/plain, Size: 7018 bytes --]

Add "always lock'd" implementations of set_bit, clear_bit and
change_bit and the corresponding test_and_ functions.  Also add
"always lock'd" implementation of cmpxchg.  These give guaranteed
strong synchronisation and are required for non-SMP kernels running on
an SMP hypervisor.

Signed-off-by: Ian Pratt <ian.pratt@xensource.com>
Signed-off-by: Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
Signed-off-by: Chris Wright <chrisw@sous-sol.org>
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Christoph Lameter <clameter@sgi.com>

---
 include/asm-i386/sync_bitops.h |  156 ++++++++++++++++++++++++++++++++++++++++
 include/asm-i386/system.h      |   36 +++++++++
 2 files changed, 192 insertions(+)


===================================================================
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -261,6 +261,9 @@ static inline unsigned long __xchg(unsig
 #define cmpxchg(ptr,o,n)\
 	((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
 					(unsigned long)(n),sizeof(*(ptr))))
+#define sync_cmpxchg(ptr,o,n)\
+	((__typeof__(*(ptr)))__sync_cmpxchg((ptr),(unsigned long)(o),\
+					(unsigned long)(n),sizeof(*(ptr))))
 #endif
 
 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
@@ -282,6 +285,39 @@ static inline unsigned long __cmpxchg(vo
 		return prev;
 	case 4:
 		__asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
+				     : "=a"(prev)
+				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
+				     : "memory");
+		return prev;
+	}
+	return old;
+}
+
+/*
+ * Always use locked operations when touching memory shared with a
+ * hypervisor, since the system may be SMP even if the guest kernel
+ * isn't.
+ */
+static inline unsigned long __sync_cmpxchg(volatile void *ptr,
+					    unsigned long old,
+					    unsigned long new, int size)
+{
+	unsigned long prev;
+	switch (size) {
+	case 1:
+		__asm__ __volatile__("lock; cmpxchgb %b1,%2"
+				     : "=a"(prev)
+				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
+				     : "memory");
+		return prev;
+	case 2:
+		__asm__ __volatile__("lock; cmpxchgw %w1,%2"
+				     : "=a"(prev)
+				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
+				     : "memory");
+		return prev;
+	case 4:
+		__asm__ __volatile__("lock; cmpxchgl %1,%2"
 				     : "=a"(prev)
 				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
 				     : "memory");
===================================================================
--- /dev/null
+++ b/include/asm-i386/sync_bitops.h
@@ -0,0 +1,156 @@
+#ifndef _I386_SYNC_BITOPS_H
+#define _I386_SYNC_BITOPS_H
+
+/*
+ * Copyright 1992, Linus Torvalds.
+ */
+
+/*
+ * These have to be done with inline assembly: that way the bit-setting
+ * is guaranteed to be atomic. All bit operations return 0 if the bit
+ * was cleared before the operation and != 0 if it was not.
+ *
+ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
+ */
+
+#define ADDR (*(volatile long *) addr)
+
+/**
+ * sync_set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This function is atomic and may not be reordered.  See __set_bit()
+ * if you do not require the atomic guarantees.
+ *
+ * Note: there are no guarantees that this function will not be reordered
+ * on non x86 architectures, so if you are writting portable code,
+ * make sure not to rely on its reordering guarantees.
+ *
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void sync_set_bit(int nr, volatile unsigned long * addr)
+{
+	__asm__ __volatile__("lock; btsl %1,%0"
+			     :"+m" (ADDR)
+			     :"Ir" (nr)
+			     : "memory");
+}
+
+/**
+ * sync_clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * sync_clear_bit() is atomic and may not be reordered.  However, it does
+ * not contain a memory barrier, so if it is used for locking purposes,
+ * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
+ * in order to ensure changes are visible on other processors.
+ */
+static inline void sync_clear_bit(int nr, volatile unsigned long * addr)
+{
+	__asm__ __volatile__("lock; btrl %1,%0"
+			     :"+m" (ADDR)
+			     :"Ir" (nr)
+			     : "memory");
+}
+
+/**
+ * sync_change_bit - Toggle a bit in memory
+ * @nr: Bit to change
+ * @addr: Address to start counting from
+ *
+ * change_bit() is atomic and may not be reordered. It may be
+ * reordered on other architectures than x86.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void sync_change_bit(int nr, volatile unsigned long * addr)
+{
+	__asm__ __volatile__("lock; btcl %1,%0"
+			     :"+m" (ADDR)
+			     :"Ir" (nr)
+			     : "memory");
+}
+
+/**
+ * sync_test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It may be reordered on other architectures than x86.
+ * It also implies a memory barrier.
+ */
+static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr)
+{
+	int oldbit;
+
+	__asm__ __volatile__("lock; btsl %2,%1\n\tsbbl %0,%0"
+			     :"=r" (oldbit),"+m" (ADDR)
+			     :"Ir" (nr) : "memory");
+	return oldbit;
+}
+
+/**
+ * sync_test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It can be reorderdered on other architectures other than x86.
+ * It also implies a memory barrier.
+ */
+static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr)
+{
+	int oldbit;
+
+	__asm__ __volatile__("lock; btrl %2,%1\n\tsbbl %0,%0"
+			     :"=r" (oldbit),"+m" (ADDR)
+			     :"Ir" (nr) : "memory");
+	return oldbit;
+}
+
+/**
+ * sync_test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static inline int sync_test_and_change_bit(int nr, volatile unsigned long* addr)
+{
+	int oldbit;
+
+	__asm__ __volatile__("lock; btcl %2,%1\n\tsbbl %0,%0"
+			     :"=r" (oldbit),"+m" (ADDR)
+			     :"Ir" (nr) : "memory");
+	return oldbit;
+}
+
+static __always_inline int sync_const_test_bit(int nr, const volatile unsigned long *addr)
+{
+	return ((1UL << (nr & 31)) &
+		(((const volatile unsigned int *)addr)[nr >> 5])) != 0;
+}
+
+static inline int sync_var_test_bit(int nr, const volatile unsigned long * addr)
+{
+	int oldbit;
+
+	__asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0"
+			     :"=r" (oldbit)
+			     :"m" (ADDR),"Ir" (nr));
+	return oldbit;
+}
+
+#define sync_test_bit(nr,addr)			\
+	(__builtin_constant_p(nr) ?		\
+	 sync_constant_test_bit((nr),(addr)) :	\
+	 sync_var_test_bit((nr),(addr)))
+
+#undef ADDR
+
+#endif /* _I386_SYNC_BITOPS_H */

--


  parent reply	other threads:[~2006-08-03  0:27 UTC|newest]

Thread overview: 40+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2006-08-03  0:25 [patch 0/8] Basic infrastructure patches for a paravirtualized kernel Jeremy Fitzhardinge
2006-08-03  0:25 ` [patch 1/8] Remove locally-defined ldt structure in favour of standard type Jeremy Fitzhardinge
2006-08-03  0:25 ` Jeremy Fitzhardinge [this message]
2006-08-03  0:28   ` [patch 2/8] Implement always-locked bit ops, for memory shared with an SMP hypervisor Christoph Lameter
2006-08-03  0:35     ` Jeremy Fitzhardinge
2006-08-03  1:06       ` Christoph Lameter
2006-08-03  1:18         ` Zachary Amsden
2006-08-03  1:25           ` Christoph Lameter
2006-08-03  3:55             ` Andi Kleen
2006-08-03  4:25               ` Christoph Lameter
2006-08-03  4:47                 ` Andi Kleen
2006-08-03  2:45         ` Andi Kleen
2006-08-03  4:27           ` Christoph Lameter
2006-08-03  4:49             ` Andi Kleen
2006-08-03  5:19               ` Christoph Lameter
2006-08-03  5:25                 ` Andi Kleen
2006-08-03  5:32                   ` Christoph Lameter
2006-08-03  5:39                     ` Andi Kleen
2006-08-03  5:54                       ` Christoph Lameter
2006-08-03  6:02                         ` Andi Kleen
2006-08-03 16:49                           ` Christoph Lameter
2006-08-03 17:18                             ` Chris Wright
2006-08-04  0:47                             ` Andi Kleen
2006-08-04  2:16                               ` Christoph Lameter
2006-08-03  0:25 ` [patch 3/8] Allow a kernel to not be in ring 0 Jeremy Fitzhardinge
2006-08-03  0:25 ` [patch 4/8] Replace sensitive instructions with macros Jeremy Fitzhardinge
2006-08-03  0:25 ` [patch 5/8] Roll all the cpuid asm into one __cpuid call Jeremy Fitzhardinge
2006-08-03  0:25 ` [patch 6/8] Make __FIXADDR_TOP variable to allow it to make space for a hypervisor Jeremy Fitzhardinge
2006-08-03  0:25 ` [patch 7/8] Add a bootparameter to reserve high linear address space Jeremy Fitzhardinge
1970-01-01  0:15   ` Pavel Machek
2006-08-07  2:10     ` Andi Kleen
2010-05-04 23:37     ` Jeremy Fitzhardinge
2006-08-03  6:19   ` Andrew Morton
2006-08-03  7:33     ` Zachary Amsden
2006-08-03  7:41       ` Andrew Morton
2006-08-03  8:58         ` Zachary Amsden
2006-08-05 21:58           ` Andrew Morton
2006-08-05 22:52             ` Zachary Amsden
2006-08-05 23:17             ` Rusty Russell
2006-08-03  0:25 ` [patch 8/8] Put .note.* sections into a PT_NOTE segment in vmlinux Jeremy Fitzhardinge

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20060803002518.061401577@xensource.com \
    --to=jeremy@xensource.com \
    --cc=Christian.Limpach@cl.cam.ac.uk \
    --cc=akpm@osdl.org \
    --cc=chrisw@sous-sol.org \
    --cc=clameter@sgi.com \
    --cc=ian.pratt@xensource.com \
    --cc=jeremy@goop.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=virtualization@lists.osdl.org \
    --cc=xen-devel@lists.xensource.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox