public inbox for linux-doc@vger.kernel.org
 help / color / mirror / Atom feed
From: Mark Rutland <mark.rutland@arm.com>
To: linux-kernel@vger.kernel.org
Cc: akiyks@gmail.com, boqun.feng@gmail.com, corbet@lwn.net,
	keescook@chromium.org, linux-arch@vger.kernel.org,
	linux@armlinux.org.uk, linux-doc@vger.kernel.org,
	mark.rutland@arm.com, paulmck@kernel.org, peterz@infradead.org,
	sstabellini@kernel.org, will@kernel.org
Subject: [PATCH 01/26] locking/atomic: arm: fix sync ops
Date: Mon, 22 May 2023 13:24:04 +0100	[thread overview]
Message-ID: <20230522122429.1915021-2-mark.rutland@arm.com> (raw)
In-Reply-To: <20230522122429.1915021-1-mark.rutland@arm.com>

The sync_*() ops on arch/arm are defined in terms of the regular bitops
with no special handling. This is not correct, as UP kernels elide
barriers for the fully-ordered operations, and so the required ordering
is lost when such UP kernels are run under a hypervsior on an SMP
system.

Fix this by defining sync ops with the required barriers.

Note: On 32-bit arm, the sync_*() ops are currently only used by Xen,
which requires ARMv7, but the semantics can be implemented for ARMv6+.

Fixes: e54d2f61528165bb ("xen/arm: sync_bitops")
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Paul E. McKenney <paulmck@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Will Deacon <will@kernel.org>
---
 arch/arm/include/asm/assembler.h   | 17 +++++++++++++++++
 arch/arm/include/asm/sync_bitops.h | 29 +++++++++++++++++++++++++----
 arch/arm/lib/bitops.h              | 14 +++++++++++---
 arch/arm/lib/testchangebit.S       |  4 ++++
 arch/arm/lib/testclearbit.S        |  4 ++++
 arch/arm/lib/testsetbit.S          |  4 ++++
 6 files changed, 65 insertions(+), 7 deletions(-)

diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 505a306e0271a..aebe2c8f6a686 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -394,6 +394,23 @@ ALT_UP_B(.L0_\@)
 #endif
 	.endm
 
+/*
+ * Raw SMP data memory barrier
+ */
+	.macro	__smp_dmb mode
+#if __LINUX_ARM_ARCH__ >= 7
+	.ifeqs "\mode","arm"
+	dmb	ish
+	.else
+	W(dmb)	ish
+	.endif
+#elif __LINUX_ARM_ARCH__ == 6
+	mcr	p15, 0, r0, c7, c10, 5	@ dmb
+#else
+	.error "Incompatible SMP platform"
+#endif
+	.endm
+
 #if defined(CONFIG_CPU_V7M)
 	/*
 	 * setmode is used to assert to be in svc mode during boot. For v7-M
diff --git a/arch/arm/include/asm/sync_bitops.h b/arch/arm/include/asm/sync_bitops.h
index 6f5d627c44a3c..f46b3c570f92e 100644
--- a/arch/arm/include/asm/sync_bitops.h
+++ b/arch/arm/include/asm/sync_bitops.h
@@ -14,14 +14,35 @@
  * ops which are SMP safe even on a UP kernel.
  */
 
+/*
+ * Unordered
+ */
+
 #define sync_set_bit(nr, p)		_set_bit(nr, p)
 #define sync_clear_bit(nr, p)		_clear_bit(nr, p)
 #define sync_change_bit(nr, p)		_change_bit(nr, p)
-#define sync_test_and_set_bit(nr, p)	_test_and_set_bit(nr, p)
-#define sync_test_and_clear_bit(nr, p)	_test_and_clear_bit(nr, p)
-#define sync_test_and_change_bit(nr, p)	_test_and_change_bit(nr, p)
 #define sync_test_bit(nr, addr)		test_bit(nr, addr)
-#define arch_sync_cmpxchg		arch_cmpxchg
 
+/*
+ * Fully ordered
+ */
+
+int _sync_test_and_set_bit(int nr, volatile unsigned long * p);
+#define sync_test_and_set_bit(nr, p)	_sync_test_and_set_bit(nr, p)
+
+int _sync_test_and_clear_bit(int nr, volatile unsigned long * p);
+#define sync_test_and_clear_bit(nr, p)	_sync_test_and_clear_bit(nr, p)
+
+int _sync_test_and_change_bit(int nr, volatile unsigned long * p);
+#define sync_test_and_change_bit(nr, p)	_sync_test_and_change_bit(nr, p)
+
+#define arch_sync_cmpxchg(ptr, old, new)				\
+({									\
+	__typeof__(*(ptr)) __ret;					\
+	__smp_mb__before_atomic();					\
+	__ret = arch_cmpxchg_relaxed((ptr), (old), (new));		\
+	__smp_mb__after_atomic();					\
+	__ret;								\
+})
 
 #endif
diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h
index 95bd359912889..f069d1b2318e6 100644
--- a/arch/arm/lib/bitops.h
+++ b/arch/arm/lib/bitops.h
@@ -28,7 +28,7 @@ UNWIND(	.fnend		)
 ENDPROC(\name		)
 	.endm
 
-	.macro	testop, name, instr, store
+	.macro	__testop, name, instr, store, barrier
 ENTRY(	\name		)
 UNWIND(	.fnstart	)
 	ands	ip, r1, #3
@@ -38,7 +38,7 @@ UNWIND(	.fnstart	)
 	mov	r0, r0, lsr #5
 	add	r1, r1, r0, lsl #2	@ Get word offset
 	mov	r3, r2, lsl r3		@ create mask
-	smp_dmb
+	\barrier
 #if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
 	.arch_extension	mp
 	ALT_SMP(W(pldw)	[r1])
@@ -50,13 +50,21 @@ UNWIND(	.fnstart	)
 	strex	ip, r2, [r1]
 	cmp	ip, #0
 	bne	1b
-	smp_dmb
+	\barrier
 	cmp	r0, #0
 	movne	r0, #1
 2:	bx	lr
 UNWIND(	.fnend		)
 ENDPROC(\name		)
 	.endm
+
+	.macro	testop, name, instr, store
+	__testop \name, \instr, \store, smp_dmb
+	.endm
+
+	.macro	sync_testop, name, instr, store
+	__testop \name, \instr, \store, __smp_dmb
+	.endm
 #else
 	.macro	bitop, name, instr
 ENTRY(	\name		)
diff --git a/arch/arm/lib/testchangebit.S b/arch/arm/lib/testchangebit.S
index 4ebecc67e6e04..f13fe9bc2399a 100644
--- a/arch/arm/lib/testchangebit.S
+++ b/arch/arm/lib/testchangebit.S
@@ -10,3 +10,7 @@
                 .text
 
 testop	_test_and_change_bit, eor, str
+
+#if __LINUX_ARM_ARCH__ >= 6
+sync_testop	_sync_test_and_change_bit, eor, str
+#endif
diff --git a/arch/arm/lib/testclearbit.S b/arch/arm/lib/testclearbit.S
index 009afa0f5b4a7..4d2c5ca620ebf 100644
--- a/arch/arm/lib/testclearbit.S
+++ b/arch/arm/lib/testclearbit.S
@@ -10,3 +10,7 @@
                 .text
 
 testop	_test_and_clear_bit, bicne, strne
+
+#if __LINUX_ARM_ARCH__ >= 6
+sync_testop	_sync_test_and_clear_bit, bicne, strne
+#endif
diff --git a/arch/arm/lib/testsetbit.S b/arch/arm/lib/testsetbit.S
index f3192e55acc87..649dbab65d8d0 100644
--- a/arch/arm/lib/testsetbit.S
+++ b/arch/arm/lib/testsetbit.S
@@ -10,3 +10,7 @@
                 .text
 
 testop	_test_and_set_bit, orreq, streq
+
+#if __LINUX_ARM_ARCH__ >= 6
+sync_testop	_sync_test_and_set_bit, orreq, streq
+#endif
-- 
2.30.2


  reply	other threads:[~2023-05-22 12:27 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-05-22 12:24 [PATCH 00/26] locking/atomic: restructuring + kerneldoc Mark Rutland
2023-05-22 12:24 ` Mark Rutland [this message]
2023-05-22 12:24 ` [PATCH 02/26] locking/atomic: remove fallback comments Mark Rutland
2023-05-22 12:24 ` [PATCH 03/26] locking/atomic: hexagon: remove redundant arch_atomic_cmpxchg Mark Rutland
2023-05-22 12:24 ` [PATCH 04/26] locking/atomic: make atomic*_{cmp,}xchg optional Mark Rutland
2023-05-22 12:24 ` [PATCH 05/26] locking/atomic: arc: add preprocessor symbols Mark Rutland
2023-05-22 12:24 ` [PATCH 06/26] locking/atomic: arm: " Mark Rutland
2023-05-22 12:24 ` [PATCH 07/26] locking/atomic: hexagon: " Mark Rutland
2023-05-22 12:24 ` [PATCH 08/26] locking/atomic: m68k: " Mark Rutland
2023-05-22 12:24 ` [PATCH 09/26] locking/atomic: parisc: " Mark Rutland
2023-05-22 12:24 ` [PATCH 10/26] locking/atomic: sh: " Mark Rutland
2023-05-22 12:24 ` [PATCH 11/26] locking/atomic: sparc: " Mark Rutland
2023-05-22 12:24 ` [PATCH 12/26] locking/atomic: x86: " Mark Rutland
2023-05-22 12:24 ` [PATCH 13/26] locking/atomic: xtensa: " Mark Rutland
2023-05-22 12:24 ` [PATCH 14/26] locking/atomic: scripts: remove bogus order parameter Mark Rutland
2023-05-22 12:24 ` [PATCH 15/26] locking/atomic: scripts: remove leftover "${mult}" Mark Rutland
2023-05-22 12:24 ` [PATCH 16/26] locking/atomic: scripts: factor out order template generation Mark Rutland
2023-05-22 12:24 ` [PATCH 18/26] locking/atomic: treewide: use raw_atomic*_<op>() Mark Rutland
2023-05-22 12:24 ` [PATCH 19/26] locking/atomic: scripts: build raw_atomic_long*() directly Mark Rutland
2023-05-22 12:24 ` [PATCH 21/26] locking/atomic: scripts: split pfx/name/sfx/order Mark Rutland
2023-05-22 12:24 ` [PATCH 22/26] locking/atomic: scripts: simplify raw_atomic_long*() definitions Mark Rutland
2023-05-22 12:24 ` [PATCH 25/26] locking/atomic: docs: Add atomic operations to the driver basic API documentation Mark Rutland
2023-05-24 14:10   ` Akira Yokosawa
2023-05-30 12:33     ` Mark Rutland
2023-05-22 12:24 ` [PATCH 26/26] locking/atomic: treewide: delete arch_atomic_*() kerneldoc Mark Rutland
2023-05-22 20:58 ` [PATCH 00/26] locking/atomic: restructuring + kerneldoc Kees Cook
     [not found] ` <20230522122429.1915021-25-mark.rutland@arm.com>
2023-05-24 14:03   ` [PATCH 24/26] locking/atomic: scripts: generate kerneldoc comments Akira Yokosawa
2023-05-24 14:11     ` Peter Zijlstra
2023-05-26  3:17       ` Akira Yokosawa
2023-05-26  4:51         ` Randy Dunlap
2023-05-26 10:27           ` Akira Yokosawa
2023-05-26 15:24             ` Randy Dunlap
2023-05-30 12:42             ` Mark Rutland
2023-05-31 23:41               ` Akira Yokosawa
2023-06-01 10:22                 ` Mark Rutland
2023-05-24 14:17   ` Peter Zijlstra

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230522122429.1915021-2-mark.rutland@arm.com \
    --to=mark.rutland@arm.com \
    --cc=akiyks@gmail.com \
    --cc=boqun.feng@gmail.com \
    --cc=corbet@lwn.net \
    --cc=keescook@chromium.org \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux@armlinux.org.uk \
    --cc=paulmck@kernel.org \
    --cc=peterz@infradead.org \
    --cc=sstabellini@kernel.org \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox