linux-arch.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Peter Zijlstra <peterz@infradead.org>
To: torvalds@linux-foundation.org, mingo@kernel.org,
	tglx@linutronix.de, will.deacon@arm.com,
	paulmck@linux.vnet.ibm.com, boqun.feng@gmail.com,
	waiman.long@hpe.com, fweisbec@gmail.com
Cc: linux-kernel@vger.kernel.org, linux-arch@vger.kernel.org,
	rth@twiddle.net, vgupta@synopsys.com, linux@arm.linux.org.uk,
	egtvedt@samfundet.no, realmz6@gmail.com,
	ysato@users.sourceforge.jp, rkuo@codeaurora.org,
	tony.luck@intel.com, geert@linux-m68k.org,
	james.hogan@imgtec.com, ralf@linux-mips.org, dhowells@redhat.com,
	jejb@parisc-linux.org, mpe@ellerman.id.au,
	schwidefsky@de.ibm.com, dalias@libc.org, davem@davemloft.net,
	cmetcalf@mellanox.com, jcmvbkbc@gmail.com, arnd@arndb.de,
	peterz@infradead.org, dbueso@suse.de, fengguang.wu@intel.com
Subject: [RFC][PATCH 15/31] locking,mips: Implement atomic{,64}_fetch_{add,sub,and,or,xor}()
Date: Fri, 22 Apr 2016 11:04:28 +0200	[thread overview]
Message-ID: <20160422093924.017695144@infradead.org> (raw)
In-Reply-To: 20160422090413.393652501@infradead.org

[-- Attachment #1: peterz-atomic-fetch-mips.patch --]
[-- Type: text/plain, Size: 5948 bytes --]

Implement FETCH-OP atomic primitives, these are very similar to the
existing OP-RETURN primitives we already have, except they return the
value of the atomic variable _before_ modification.

This is especially useful for irreversible operations -- such as
bitops (because it becomes impossible to reconstruct the state prior
to modification).

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 arch/mips/include/asm/atomic.h |  138 ++++++++++++++++++++++++++++++++++++++---
 1 file changed, 129 insertions(+), 9 deletions(-)

--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -66,7 +66,7 @@ static __inline__ void atomic_##op(int i
 			"	" #asm_op " %0, %2			\n"   \
 			"	sc	%0, %1				\n"   \
 			"	.set	mips0				\n"   \
-			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)      \
+			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)  \
 			: "Ir" (i));					      \
 		} while (unlikely(!temp));				      \
 	} else {							      \
@@ -130,18 +130,78 @@ static __inline__ int atomic_##op##_retu
 	return result;							      \
 }
 
+#define ATOMIC_FETCH_OP(op, c_op, asm_op)				      \
+static __inline__ int atomic_fetch_##op(int i, atomic_t * v)		      \
+{									      \
+	int result;							      \
+									      \
+	smp_mb__before_llsc();						      \
+									      \
+	if (kernel_uses_llsc && R10000_LLSC_WAR) {			      \
+		int temp;						      \
+									      \
+		__asm__ __volatile__(					      \
+		"	.set	arch=r4000				\n"   \
+		"1:	ll	%1, %2		# atomic_fetch_" #op "	\n"   \
+		"	" #asm_op " %0, %1, %3				\n"   \
+		"	sc	%0, %2					\n"   \
+		"	beqzl	%0, 1b					\n"   \
+		"	move	%0, %1					\n"   \
+		"	.set	mips0					\n"   \
+		: "=&r" (result), "=&r" (temp),				      \
+		  "+" GCC_OFF_SMALL_ASM() (v->counter)			      \
+		: "Ir" (i));						      \
+	} else if (kernel_uses_llsc) {					      \
+		int temp;						      \
+									      \
+		do {							      \
+			__asm__ __volatile__(				      \
+			"	.set	"MIPS_ISA_LEVEL"		\n"   \
+			"	ll	%1, %2	# atomic_fetch_" #op "	\n"   \
+			"	" #asm_op " %0, %1, %3			\n"   \
+			"	sc	%0, %2				\n"   \
+			"	.set	mips0				\n"   \
+			: "=&r" (result), "=&r" (temp),			      \
+			  "+" GCC_OFF_SMALL_ASM() (v->counter)		      \
+			: "Ir" (i));					      \
+		} while (unlikely(!result));				      \
+									      \
+		result = temp;						      \
+	} else {							      \
+		unsigned long flags;					      \
+									      \
+		raw_local_irq_save(flags);				      \
+		result = v->counter;					      \
+		v->counter c_op i;					      \
+		raw_local_irq_restore(flags);				      \
+	}								      \
+									      \
+	smp_llsc_mb();							      \
+									      \
+	return result;							      \
+}
+
 #define ATOMIC_OPS(op, c_op, asm_op)					      \
 	ATOMIC_OP(op, c_op, asm_op)					      \
-	ATOMIC_OP_RETURN(op, c_op, asm_op)
+	ATOMIC_OP_RETURN(op, c_op, asm_op)				      \
+	ATOMIC_FETCH_OP(op, c_op, asm_op)
 
 ATOMIC_OPS(add, +=, addu)
 ATOMIC_OPS(sub, -=, subu)
 
-ATOMIC_OP(and, &=, and)
-ATOMIC_OP(or, |=, or)
-ATOMIC_OP(xor, ^=, xor)
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, c_op, asm_op)					      \
+	ATOMIC_OP(op, c_op, asm_op)					      \
+	ATOMIC_FETCH_OP(op, c_op, asm_op)
+
+#define atomic_fetch_or atomic_fetch_or
+
+ATOMIC_OPS(and, &=, and)
+ATOMIC_OPS(or, |=, or)
+ATOMIC_OPS(xor, ^=, xor)
 
 #undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
@@ -414,17 +474,77 @@ static __inline__ long atomic64_##op##_r
 	return result;							      \
 }
 
+#define ATOMIC64_FETCH_OP(op, c_op, asm_op)				      \
+static __inline__ long atomic64_fetch_##op(long i, atomic64_t * v)	      \
+{									      \
+	long result;							      \
+									      \
+	smp_mb__before_llsc();						      \
+									      \
+	if (kernel_uses_llsc && R10000_LLSC_WAR) {			      \
+		long temp;						      \
+									      \
+		__asm__ __volatile__(					      \
+		"	.set	arch=r4000				\n"   \
+		"1:	lld	%1, %2		# atomic64_fetch_" #op "\n"   \
+		"	" #asm_op " %0, %1, %3				\n"   \
+		"	scd	%0, %2					\n"   \
+		"	beqzl	%0, 1b					\n"   \
+		"	move	%0, %1					\n"   \
+		"	.set	mips0					\n"   \
+		: "=&r" (result), "=&r" (temp),				      \
+		  "+" GCC_OFF_SMALL_ASM() (v->counter)			      \
+		: "Ir" (i));						      \
+	} else if (kernel_uses_llsc) {					      \
+		long temp;						      \
+									      \
+		do {							      \
+			__asm__ __volatile__(				      \
+			"	.set	"MIPS_ISA_LEVEL"		\n"   \
+			"	lld	%1, %2	# atomic64_fetch_" #op "\n"   \
+			"	" #asm_op " %0, %1, %3			\n"   \
+			"	scd	%0, %2				\n"   \
+			"	.set	mips0				\n"   \
+			: "=&r" (result), "=&r" (temp),			      \
+			  "=" GCC_OFF_SMALL_ASM() (v->counter)		      \
+			: "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)	      \
+			: "memory");					      \
+		} while (unlikely(!result));				      \
+									      \
+		result = temp;						      \
+	} else {							      \
+		unsigned long flags;					      \
+									      \
+		raw_local_irq_save(flags);				      \
+		result = v->counter;					      \
+		v->counter c_op i;					      \
+		raw_local_irq_restore(flags);				      \
+	}								      \
+									      \
+	smp_llsc_mb();							      \
+									      \
+	return result;							      \
+}
+
 #define ATOMIC64_OPS(op, c_op, asm_op)					      \
 	ATOMIC64_OP(op, c_op, asm_op)					      \
-	ATOMIC64_OP_RETURN(op, c_op, asm_op)
+	ATOMIC64_OP_RETURN(op, c_op, asm_op)				      \
+	ATOMIC64_FETCH_OP(op, c_op, asm_op)
 
 ATOMIC64_OPS(add, +=, daddu)
 ATOMIC64_OPS(sub, -=, dsubu)
-ATOMIC64_OP(and, &=, and)
-ATOMIC64_OP(or, |=, or)
-ATOMIC64_OP(xor, ^=, xor)
 
 #undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op, c_op, asm_op)					      \
+	ATOMIC64_OP(op, c_op, asm_op)					      \
+	ATOMIC64_FETCH_OP(op, c_op, asm_op)
+
+ATOMIC64_OPS(and, &=, and)
+ATOMIC64_OPS(or, |=, or)
+ATOMIC64_OPS(xor, ^=, xor)
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
 #undef ATOMIC64_OP_RETURN
 #undef ATOMIC64_OP
 

  parent reply	other threads:[~2016-04-22  9:04 UTC|newest]

Thread overview: 106+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-04-22  9:04 [RFC][PATCH 00/31] implement atomic_fetch_$op Peter Zijlstra
2016-04-22  9:04 ` Peter Zijlstra
2016-04-22  9:04 ` [RFC][PATCH 01/31] locking: Flip arguments to atomic_fetch_or Peter Zijlstra
2016-04-22  9:04   ` Peter Zijlstra
2016-04-22 10:54   ` Will Deacon
2016-04-22 11:09   ` Geert Uytterhoeven
2016-04-22 11:09     ` Geert Uytterhoeven
2016-04-22 14:18     ` Peter Zijlstra
2016-04-22 14:18       ` Peter Zijlstra
2016-04-22  9:04 ` [RFC][PATCH 02/31] locking,alpha: Implement atomic{,64}_fetch_{add,sub,and,andnot,or,xor}() Peter Zijlstra
2016-04-22  9:04   ` Peter Zijlstra
2016-04-22 16:57   ` Richard Henderson
2016-04-23  1:55     ` Peter Zijlstra
2016-04-22  9:04 ` [RFC][PATCH 03/31] locking,arc: Implement atomic_fetch_{add,sub,and,andnot,or,xor}() Peter Zijlstra
2016-04-22 10:50   ` Vineet Gupta
2016-04-22 10:50     ` Vineet Gupta
2016-04-22 14:16     ` Peter Zijlstra
2016-04-22 14:16       ` Peter Zijlstra
2016-04-25  4:26       ` Vineet Gupta
2016-04-25  4:26         ` Vineet Gupta
2016-04-22 14:26     ` Peter Zijlstra
2016-04-22 14:26       ` Peter Zijlstra
2016-04-22  9:04 ` [RFC][PATCH 04/31] locking,arm: Implement atomic{,64}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}() Peter Zijlstra
2016-04-22  9:04   ` Peter Zijlstra
2016-04-22 11:35   ` Will Deacon
2016-04-22 11:35     ` Will Deacon
2016-04-22  9:04 ` [RFC][PATCH 05/31] locking,arm64: " Peter Zijlstra
2016-04-22  9:04   ` Peter Zijlstra
2016-04-22 11:08   ` Will Deacon
2016-04-22 14:23   ` Will Deacon
2016-04-22 14:23     ` Will Deacon
2016-04-22  9:04 ` [RFC][PATCH 06/31] locking,avr32: Implement atomic_fetch_{add,sub,and,or,xor}() Peter Zijlstra
2016-04-22  9:04   ` Peter Zijlstra
2016-04-22 11:58   ` Hans-Christian Noren Egtvedt
2016-04-22 11:58     ` Hans-Christian Noren Egtvedt
2016-04-22  9:04 ` [RFC][PATCH 07/31] locking,blackfin: " Peter Zijlstra
2016-04-22  9:04   ` Peter Zijlstra
2016-04-22  9:04 ` [RFC][PATCH 08/31] locking,frv: Implement atomic{,64}_fetch_{add,sub,and,or,xor}() Peter Zijlstra
2016-04-22  9:04   ` Peter Zijlstra
2016-04-22  9:04 ` [RFC][PATCH 09/31] locking,h8300: Implement atomic_fetch_{add,sub,and,or,xor}() Peter Zijlstra
2016-04-22  9:04   ` Peter Zijlstra
2016-04-22  9:04 ` [RFC][PATCH 10/31] locking,hexagon: " Peter Zijlstra
2016-04-23  2:16   ` Peter Zijlstra
2016-04-26  0:39     ` Richard Kuo
2016-04-22  9:04 ` [RFC][PATCH 11/31] locking,ia64: Implement atomic{,64}_fetch_{add,sub,and,or,xor}() Peter Zijlstra
2016-04-22  9:04   ` Peter Zijlstra
2016-04-22  9:04 ` [RFC][PATCH 12/31] locking,m32r: Implement atomic_fetch_{add,sub,and,or,xor}() Peter Zijlstra
2016-04-22  9:04   ` Peter Zijlstra
2016-04-22  9:04 ` [RFC][PATCH 13/31] locking,m68k: " Peter Zijlstra
2016-04-22  9:04   ` Peter Zijlstra
2016-04-22  9:04 ` [RFC][PATCH 14/31] locking,metag: " Peter Zijlstra
2016-04-22  9:04   ` Peter Zijlstra
2016-04-30  0:20   ` James Hogan
2016-05-02  8:15     ` Peter Zijlstra
2016-04-22  9:04 ` Peter Zijlstra [this message]
2016-04-22  9:04   ` [RFC][PATCH 15/31] locking,mips: Implement atomic{,64}_fetch_{add,sub,and,or,xor}() Peter Zijlstra
2016-04-22  9:04 ` [RFC][PATCH 16/31] locking,mn10300: Implement atomic_fetch_{add,sub,and,or,xor}() Peter Zijlstra
2016-04-22  9:04   ` Peter Zijlstra
2016-04-22  9:04 ` [RFC][PATCH 17/31] locking,parisc: Implement atomic{,64}_fetch_{add,sub,and,or,xor}() Peter Zijlstra
2016-04-22  9:04   ` Peter Zijlstra
2016-04-22  9:04 ` [RFC][PATCH 18/31] locking,powerpc: Implement atomic{,64}_fetch_{add,sub,and,or,xor}{,_relaxed,_acquire,_release}() Peter Zijlstra
2016-04-22  9:04   ` Peter Zijlstra
2016-04-22 16:41   ` Boqun Feng
2016-04-23  2:31     ` Peter Zijlstra
2016-04-22  9:04 ` [RFC][PATCH 19/31] locking,s390: Implement atomic{,64}_fetch_{add,sub,and,or,xor}() Peter Zijlstra
2016-04-25  8:06   ` Martin Schwidefsky
2016-04-25  8:06     ` Martin Schwidefsky
2016-04-25  8:26     ` Peter Zijlstra
2016-04-25  8:26       ` Peter Zijlstra
2016-04-22  9:04 ` [RFC][PATCH 20/31] locking,sh: Implement atomic_fetch_{add,sub,and,or,xor}() Peter Zijlstra
2016-04-22  9:04   ` Peter Zijlstra
2016-04-22  9:04 ` [RFC][PATCH 21/31] locking,sparc: Implement atomic{,64}_fetch_{add,sub,and,or,xor}() Peter Zijlstra
2016-04-22  9:04 ` [RFC][PATCH 22/31] locking,tile: " Peter Zijlstra
2016-04-22  9:04   ` Peter Zijlstra
2016-04-25 21:10   ` Chris Metcalf
     [not found]   ` <571E840A.8090703@mellanox.com>
2016-04-26 15:28     ` Peter Zijlstra
2016-04-26 15:32       ` Chris Metcalf
2016-04-22  9:04 ` [RFC][PATCH 23/31] locking,x86: " Peter Zijlstra
2016-04-22  9:04   ` Peter Zijlstra
2016-04-22  9:04 ` [RFC][PATCH 24/31] locking,xtensa: Implement atomic_fetch_{add,sub,and,or,xor}() Peter Zijlstra
2016-04-22  9:04   ` Peter Zijlstra
2016-04-22  9:04 ` [RFC][PATCH 25/31] locking: Fix atomic64_relaxed bits Peter Zijlstra
2016-04-22  9:04   ` Peter Zijlstra
2016-04-22  9:04 ` [RFC][PATCH 26/31] locking: Implement atomic{,64,_long}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}() Peter Zijlstra
2016-04-22  9:04   ` Peter Zijlstra
2016-04-22  9:04 ` [RFC][PATCH 27/31] locking: Remove linux/atomic.h:atomic_fetch_or Peter Zijlstra
2016-04-22  9:04   ` Peter Zijlstra
2016-04-22 13:02   ` Will Deacon
2016-04-22 14:21     ` Peter Zijlstra
2016-04-22  9:04 ` [RFC][PATCH 28/31] locking: Remove the deprecated atomic_{set,clear}_mask() functions Peter Zijlstra
2016-04-22  9:04   ` Peter Zijlstra
2016-04-22  9:04 ` [RFC][PATCH 29/31] locking,alpha: Convert to _relaxed atomics Peter Zijlstra
2016-04-22  9:04   ` Peter Zijlstra
2016-04-22  9:04 ` [RFC][PATCH 30/31] locking,mips: " Peter Zijlstra
2016-04-22  9:04   ` Peter Zijlstra
2016-04-22  9:04 ` [RFC][PATCH 31/31] locking,qrwlock: Employ atomic_fetch_add_acquire() Peter Zijlstra
2016-04-22 14:25   ` Waiman Long
2016-04-22  9:44 ` [RFC][PATCH 00/31] implement atomic_fetch_$op Peter Zijlstra
2016-04-22  9:44   ` Peter Zijlstra
2016-04-22 12:56   ` Fengguang Wu
2016-04-22 13:03     ` Will Deacon
2016-04-22 13:03       ` Will Deacon
2016-04-22 14:23     ` Peter Zijlstra
2016-04-23  1:59       ` Fengguang Wu
2016-04-22 18:35     ` Kalle Valo
2016-04-23  3:23       ` Fengguang Wu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20160422093924.017695144@infradead.org \
    --to=peterz@infradead.org \
    --cc=arnd@arndb.de \
    --cc=boqun.feng@gmail.com \
    --cc=cmetcalf@mellanox.com \
    --cc=dalias@libc.org \
    --cc=davem@davemloft.net \
    --cc=dbueso@suse.de \
    --cc=dhowells@redhat.com \
    --cc=egtvedt@samfundet.no \
    --cc=fengguang.wu@intel.com \
    --cc=fweisbec@gmail.com \
    --cc=geert@linux-m68k.org \
    --cc=james.hogan@imgtec.com \
    --cc=jcmvbkbc@gmail.com \
    --cc=jejb@parisc-linux.org \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux@arm.linux.org.uk \
    --cc=mingo@kernel.org \
    --cc=mpe@ellerman.id.au \
    --cc=paulmck@linux.vnet.ibm.com \
    --cc=ralf@linux-mips.org \
    --cc=realmz6@gmail.com \
    --cc=rkuo@codeaurora.org \
    --cc=rth@twiddle.net \
    --cc=schwidefsky@de.ibm.com \
    --cc=tglx@linutronix.de \
    --cc=tony.luck@intel.com \
    --cc=torvalds@linux-foundation.org \
    --cc=vgupta@synopsys.com \
    --cc=waiman.long@hpe.com \
    --cc=will.deacon@arm.com \
    --cc=ysato@users.sourceforge.jp \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).