xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Ian Campbell <ian.campbell@citrix.com>
To: xen-devel@lists.xen.org
Cc: julien.grall@citrix.com, tim@xen.org,
	Ian Campbell <ian.campbell@citrix.com>,
	stefano.stabellini@eu.citrix.com
Subject: [PATCH 2/3] xen/arm64: resync atomics and spinlock asm with Linux
Date: Fri, 19 Jul 2013 16:20:09 +0100	[thread overview]
Message-ID: <1374247210-20994-2-git-send-email-ian.campbell@citrix.com> (raw)
In-Reply-To: <1374247170.13645.100.camel@kazak.uk.xensource.com>

This picks up the changes from Linux commit 3a0310eb369a:
    arm64: atomics: fix grossly inconsistent asm constraints for exclusives

    Our uses of inline asm constraints for atomic operations are fairly
    wild and varied. We basically need to guarantee the following:

      1. Any instructions with barrier implications
         (load-acquire/store-release) have a "memory" clobber

      2. When performing exclusive accesses, the addresing mode is generated
         using the "Q" constraint

      3. Atomic blocks which use the condition flags, have a "cc" clobber

    This patch addresses these concerns which, as well as fixing the
    semantics of the code, stops GCC complaining about impossible asm
    constraints.

    Signed-off-by: Will Deacon <will.deacon@arm.com>
    Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
---
 xen/include/asm-arm/arm64/atomic.h   |   66 +++++++++++++++---------------
 xen/include/asm-arm/arm64/spinlock.h |   48 +++++++++++-----------
 xen/include/asm-arm/arm64/system.h   |   74 +++++++++++++++++-----------------
 3 files changed, 94 insertions(+), 94 deletions(-)

diff --git a/xen/include/asm-arm/arm64/atomic.h b/xen/include/asm-arm/arm64/atomic.h
index 5e4ffed..a279755 100644
--- a/xen/include/asm-arm/arm64/atomic.h
+++ b/xen/include/asm-arm/arm64/atomic.h
@@ -33,12 +33,12 @@ static inline void atomic_add(int i, atomic_t *v)
 	int result;
 
 	asm volatile("// atomic_add\n"
-"1:	ldxr	%w0, [%3]\n"
-"	add	%w0, %w0, %w4\n"
-"	stxr	%w1, %w0, [%3]\n"
+"1:	ldxr	%w0, %2\n"
+"	add	%w0, %w0, %w3\n"
+"	stxr	%w1, %w0, %2\n"
 "	cbnz	%w1, 1b"
-	: "=&r" (result), "=&r" (tmp), "+o" (v->counter)
-	: "r" (&v->counter), "Ir" (i)
+	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+	: "Ir" (i)
 	: "cc");
 }
 
@@ -48,13 +48,13 @@ static inline int atomic_add_return(int i, atomic_t *v)
 	int result;
 
 	asm volatile("// atomic_add_return\n"
-"1:	ldaxr	%w0, [%3]\n"
-"	add	%w0, %w0, %w4\n"
-"	stlxr	%w1, %w0, [%3]\n"
+"1:	ldaxr	%w0, %2\n"
+"	add	%w0, %w0, %w3\n"
+"	stlxr	%w1, %w0, %2\n"
 "	cbnz	%w1, 1b"
-	: "=&r" (result), "=&r" (tmp), "+o" (v->counter)
-	: "r" (&v->counter), "Ir" (i)
-	: "cc");
+	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+	: "Ir" (i)
+	: "cc", "memory");
 
 	return result;
 }
@@ -65,12 +65,12 @@ static inline void atomic_sub(int i, atomic_t *v)
 	int result;
 
 	asm volatile("// atomic_sub\n"
-"1:	ldxr	%w0, [%3]\n"
-"	sub	%w0, %w0, %w4\n"
-"	stxr	%w1, %w0, [%3]\n"
+"1:	ldxr	%w0, %2\n"
+"	sub	%w0, %w0, %w3\n"
+"	stxr	%w1, %w0, %2\n"
 "	cbnz	%w1, 1b"
-	: "=&r" (result), "=&r" (tmp), "+o" (v->counter)
-	: "r" (&v->counter), "Ir" (i)
+	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+	: "Ir" (i)
 	: "cc");
 }
 
@@ -80,13 +80,13 @@ static inline int atomic_sub_return(int i, atomic_t *v)
 	int result;
 
 	asm volatile("// atomic_sub_return\n"
-"1:	ldaxr	%w0, [%3]\n"
-"	sub	%w0, %w0, %w4\n"
-"	stlxr	%w1, %w0, [%3]\n"
+"1:	ldaxr	%w0, %2\n"
+"	sub	%w0, %w0, %w3\n"
+"	stlxr	%w1, %w0, %2\n"
 "	cbnz	%w1, 1b"
-	: "=&r" (result), "=&r" (tmp), "+o" (v->counter)
-	: "r" (&v->counter), "Ir" (i)
-	: "cc");
+	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+	: "Ir" (i)
+	: "cc", "memory");
 
 	return result;
 }
@@ -97,15 +97,15 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
 	int oldval;
 
 	asm volatile("// atomic_cmpxchg\n"
-"1:	ldaxr	%w1, [%3]\n"
-"	cmp	%w1, %w4\n"
+"1:	ldaxr	%w1, %2\n"
+"	cmp	%w1, %w3\n"
 "	b.ne	2f\n"
-"	stlxr	%w0, %w5, [%3]\n"
+"	stlxr	%w0, %w4, %2\n"
 "	cbnz	%w0, 1b\n"
 "2:"
-	: "=&r" (tmp), "=&r" (oldval), "+o" (ptr->counter)
-	: "r" (&ptr->counter), "Ir" (old), "r" (new)
-	: "cc");
+	: "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
+	: "Ir" (old), "r" (new)
+	: "cc", "memory");
 
 	return oldval;
 }
@@ -115,12 +115,12 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
 	unsigned long tmp, tmp2;
 
 	asm volatile("// atomic_clear_mask\n"
-"1:	ldxr	%0, [%3]\n"
-"	bic	%0, %0, %4\n"
-"	stxr	%w1, %0, [%3]\n"
+"1:	ldxr	%0, %2\n"
+"	bic	%0, %0, %3\n"
+"	stxr	%w1, %0, %2\n"
 "	cbnz	%w1, 1b"
-	: "=&r" (tmp), "=&r" (tmp2), "+o" (*addr)
-	: "r" (addr), "Ir" (mask)
+	: "=&r" (tmp), "=&r" (tmp2), "+Q" (*addr)
+	: "Ir" (mask)
 	: "cc");
 }
 
diff --git a/xen/include/asm-arm/arm64/spinlock.h b/xen/include/asm-arm/arm64/spinlock.h
index fe4c403..717f2fe 100644
--- a/xen/include/asm-arm/arm64/spinlock.h
+++ b/xen/include/asm-arm/arm64/spinlock.h
@@ -31,8 +31,8 @@ static always_inline void _raw_spin_unlock(raw_spinlock_t *lock)
     ASSERT(_raw_spin_is_locked(lock));
 
     asm volatile(
-        "       stlr    %w1, [%0]\n"
-        : : "r" (&lock->lock), "r" (0) : "memory");
+        "       stlr    %w1, %0\n"
+        : "=Q" (lock->lock) : "r" (0) : "memory");
 }
 
 static always_inline int _raw_spin_trylock(raw_spinlock_t *lock)
@@ -40,13 +40,13 @@ static always_inline int _raw_spin_trylock(raw_spinlock_t *lock)
     unsigned int tmp;
 
     asm volatile(
-        "       ldaxr   %w0, [%1]\n"
+        "       ldaxr   %w0, %1\n"
         "       cbnz    %w0, 1f\n"
-        "       stxr    %w0, %w2, [%1]\n"
+        "       stxr    %w0, %w2, %1\n"
         "1:\n"
-        : "=&r" (tmp)
-        : "r" (&lock->lock), "r" (1)
-        : "memory");
+        : "=&r" (tmp), "+Q" (lock->lock)
+        : "r" (1)
+        : "cc", "memory");
 
     return !tmp;
 }
@@ -62,14 +62,14 @@ static always_inline int _raw_read_trylock(raw_rwlock_t *rw)
     unsigned int tmp, tmp2 = 1;
 
     asm volatile(
-        "       ldaxr   %w0, [%2]\n"
+        "       ldaxr   %w0, %2\n"
         "       add     %w0, %w0, #1\n"
         "       tbnz    %w0, #31, 1f\n"
-        "       stxr    %w1, %w0, [%2]\n"
+        "       stxr    %w1, %w0, %2\n"
         "1:\n"
-        : "=&r" (tmp), "+r" (tmp2)
-        : "r" (&rw->lock)
-        : "memory");
+        : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
+        :
+        : "cc", "memory");
 
     return !tmp2;
 }
@@ -79,13 +79,13 @@ static always_inline int _raw_write_trylock(raw_rwlock_t *rw)
     unsigned int tmp;
 
     asm volatile(
-        "       ldaxr   %w0, [%1]\n"
+        "       ldaxr   %w0, %1\n"
         "       cbnz    %w0, 1f\n"
-        "       stxr    %w0, %w2, [%1]\n"
+        "       stxr    %w0, %w2, %1\n"
         "1:\n"
-        : "=&r" (tmp)
-        : "r" (&rw->lock), "r" (0x80000000)
-        : "memory");
+        : "=&r" (tmp), "+Q" (rw->lock)
+        : "r" (0x80000000)
+        : "cc", "memory");
 
     return !tmp;
 }
@@ -95,20 +95,20 @@ static inline void _raw_read_unlock(raw_rwlock_t *rw)
     unsigned int tmp, tmp2;
 
     asm volatile(
-        "1:     ldxr    %w0, [%2]\n"
+        "    1: ldxr    %w0, %2\n"
         "       sub     %w0, %w0, #1\n"
-        "       stlxr   %w1, %w0, [%2]\n"
+        "       stlxr   %w1, %w0, %2\n"
         "       cbnz    %w1, 1b\n"
-        : "=&r" (tmp), "=&r" (tmp2)
-        : "r" (&rw->lock)
-        : "memory");
+        : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
+        :
+        : "cc", "memory");
 }
 
 static inline void _raw_write_unlock(raw_rwlock_t *rw)
 {
     asm volatile(
-        "       stlr    %w1, [%0]\n"
-        : : "r" (&rw->lock), "r" (0) : "memory");
+        "       stlr    %w1, %0\n"
+        : "=Q" (rw->lock) : "r" (0) : "memory");
 }
 
 #define _raw_rw_is_locked(x) ((x)->lock != 0)
diff --git a/xen/include/asm-arm/arm64/system.h b/xen/include/asm-arm/arm64/system.h
index 4e41913..d7e912f 100644
--- a/xen/include/asm-arm/arm64/system.h
+++ b/xen/include/asm-arm/arm64/system.h
@@ -28,39 +28,39 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
         switch (size) {
         case 1:
                 asm volatile("//        __xchg1\n"
-                "1:     ldaxrb  %w0, [%3]\n"
-                "       stlxrb  %w1, %w2, [%3]\n"
+                "1:     ldaxrb  %w0, %2\n"
+                "       stlxrb  %w1, %w3, %2\n"
                 "       cbnz    %w1, 1b\n"
-                        : "=&r" (ret), "=&r" (tmp)
-                        : "r" (x), "r" (ptr)
-                        : "memory", "cc");
+                        : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
+                        : "r" (x)
+                        : "cc", "memory");
                 break;
         case 2:
                 asm volatile("//        __xchg2\n"
-                "1:     ldaxrh  %w0, [%3]\n"
-                "       stlxrh  %w1, %w2, [%3]\n"
+                "1:     ldaxrh  %w0, %2\n"
+                "       stlxrh  %w1, %w3, %2\n"
                 "       cbnz    %w1, 1b\n"
-                        : "=&r" (ret), "=&r" (tmp)
-                        : "r" (x), "r" (ptr)
-                        : "memory", "cc");
+                        : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
+                        : "r" (x)
+                        : "cc", "memory");
                 break;
         case 4:
                 asm volatile("//        __xchg4\n"
-                "1:     ldaxr   %w0, [%3]\n"
-                "       stlxr   %w1, %w2, [%3]\n"
+                "1:     ldaxr   %w0, %2\n"
+                "       stlxr   %w1, %w3, %2\n"
                 "       cbnz    %w1, 1b\n"
-                        : "=&r" (ret), "=&r" (tmp)
-                        : "r" (x), "r" (ptr)
-                        : "memory", "cc");
+                        : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
+                        : "r" (x)
+                        : "cc", "memory");
                 break;
         case 8:
                 asm volatile("//        __xchg8\n"
-                "1:     ldaxr   %0, [%3]\n"
-                "       stlxr   %w1, %2, [%3]\n"
+                "1:     ldaxr   %0, %2\n"
+                "       stlxr   %w1, %3, %2\n"
                 "       cbnz    %w1, 1b\n"
-                        : "=&r" (ret), "=&r" (tmp)
-                        : "r" (x), "r" (ptr)
-                        : "memory", "cc");
+                        : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
+                        : "r" (x)
+                        : "cc", "memory");
                 break;
         default:
                 __bad_xchg(ptr, size), ret = 0;
@@ -84,14 +84,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
         case 1:
                 do {
                         asm volatile("// __cmpxchg1\n"
-                        "       ldxrb   %w1, [%2]\n"
+                        "       ldxrb   %w1, %2\n"
                         "       mov     %w0, #0\n"
                         "       cmp     %w1, %w3\n"
                         "       b.ne    1f\n"
-                        "       stxrb   %w0, %w4, [%2]\n"
+                        "       stxrb   %w0, %w4, %2\n"
                         "1:\n"
-                                : "=&r" (res), "=&r" (oldval)
-                                : "r" (ptr), "Ir" (old), "r" (new)
+                                : "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr)
+                                : "Ir" (old), "r" (new)
                                 : "cc");
                 } while (res);
                 break;
@@ -99,29 +99,29 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
         case 2:
                 do {
                         asm volatile("// __cmpxchg2\n"
-                        "       ldxrh   %w1, [%2]\n"
+                        "       ldxrh   %w1, %2\n"
                         "       mov     %w0, #0\n"
                         "       cmp     %w1, %w3\n"
                         "       b.ne    1f\n"
-                        "       stxrh   %w0, %w4, [%2]\n"
+                        "       stxrh   %w0, %w4, %2\n"
                         "1:\n"
-                                : "=&r" (res), "=&r" (oldval)
-                                : "r" (ptr), "Ir" (old), "r" (new)
-                                : "memory", "cc");
+                                : "=&r" (res), "=&r" (oldval), "+Q" (*(u16 *)ptr)
+                                : "Ir" (old), "r" (new)
+                                : "cc");
                 } while (res);
                 break;
 
         case 4:
                 do {
                         asm volatile("// __cmpxchg4\n"
-                        "       ldxr    %w1, [%2]\n"
+                        "       ldxr    %w1, %2\n"
                         "       mov     %w0, #0\n"
                         "       cmp     %w1, %w3\n"
                         "       b.ne    1f\n"
-                        "       stxr    %w0, %w4, [%2]\n"
+                        "       stxr    %w0, %w4, %2\n"
                         "1:\n"
-                                : "=&r" (res), "=&r" (oldval)
-                                : "r" (ptr), "Ir" (old), "r" (new)
+                                : "=&r" (res), "=&r" (oldval), "+Q" (*(u32 *)ptr)
+                                : "Ir" (old), "r" (new)
                                 : "cc");
                 } while (res);
                 break;
@@ -129,14 +129,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
         case 8:
                 do {
                         asm volatile("// __cmpxchg8\n"
-                        "       ldxr    %1, [%2]\n"
+                        "       ldxr    %1, %2\n"
                         "       mov     %w0, #0\n"
                         "       cmp     %1, %3\n"
                         "       b.ne    1f\n"
-                        "       stxr    %w0, %4, [%2]\n"
+                        "       stxr    %w0, %4, %2\n"
                         "1:\n"
-                                : "=&r" (res), "=&r" (oldval)
-                                : "r" (ptr), "Ir" (old), "r" (new)
+                                : "=&r" (res), "=&r" (oldval), "+Q" (*(u64 *)ptr)
+                                : "Ir" (old), "r" (new)
                                 : "cc");
                 } while (res);
                 break;
-- 
1.7.2.5

  parent reply	other threads:[~2013-07-19 15:20 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-07-19 15:19 [PATCH 0/3] xen: arm: update asm primitives (bitops, spinlocks, atomics) Ian Campbell
2013-07-19 15:20 ` [PATCH 1/3] xen/arm64: Assembly optimized bitops from Linux Ian Campbell
2013-07-19 15:20 ` Ian Campbell [this message]
2013-07-29 16:02   ` [PATCH 2/3] xen/arm64: resync atomics and spinlock asm with Linux Tim Deegan
2013-07-29 16:13     ` Ian Campbell
2013-07-29 18:05       ` Will Deacon
2013-07-30  9:34         ` Ian Campbell
2013-07-30  9:45           ` Will Deacon
2013-07-30  9:55             ` Ian Campbell
2013-07-30  9:59               ` Will Deacon
2013-07-30 10:12                 ` Ian Campbell
2013-07-19 15:20 ` [PATCH 3/3] xen: arm: retry trylock if strex fails on free lock Ian Campbell
2013-07-29 15:52   ` Tim Deegan
2013-07-29 16:20     ` Ian Campbell
2013-07-29 16:25       ` Tim Deegan
2013-08-22 14:56 ` [PATCH 0/3] xen: arm: update asm primitives (bitops, spinlocks, atomics) Ian Campbell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1374247210-20994-2-git-send-email-ian.campbell@citrix.com \
    --to=ian.campbell@citrix.com \
    --cc=julien.grall@citrix.com \
    --cc=stefano.stabellini@eu.citrix.com \
    --cc=tim@xen.org \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).