From: Will Deacon <will@kernel.org>
To: linux-arm-kernel@lists.infradead.org
Cc: mark.rutland@arm.com, peterz@infradead.org,
catalin.marinas@arm.com, ndesaulniers@google.com,
robin.murphy@arm.com, Ard.Biesheuvel@arm.com,
andrew.murray@arm.com, natechancellor@gmail.com,
Will Deacon <will@kernel.org>
Subject: [PATCH v5 07/10] arm64: asm: Kill 'asm/atomic_arch.h'
Date: Thu, 29 Aug 2019 16:48:31 +0100 [thread overview]
Message-ID: <20190829154834.26547-8-will@kernel.org> (raw)
In-Reply-To: <20190829154834.26547-1-will@kernel.org>
The contents of 'asm/atomic_arch.h' can be split across some of our
other 'asm/' headers. Remove it.
Signed-off-by: Will Deacon <will@kernel.org>
---
arch/arm64/include/asm/atomic.h | 77 ++++++++++++++++-
arch/arm64/include/asm/atomic_arch.h | 155 -----------------------------------
arch/arm64/include/asm/cmpxchg.h | 41 ++++++++-
arch/arm64/include/asm/lse.h | 24 ++++++
4 files changed, 140 insertions(+), 157 deletions(-)
delete mode 100644 arch/arm64/include/asm/atomic_arch.h
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index c70d3f389d29..7c334337674d 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -17,9 +17,84 @@
#ifdef __KERNEL__
-#include <asm/atomic_arch.h>
#include <asm/cmpxchg.h>
+#define ATOMIC_OP(op) \
+static inline void arch_##op(int i, atomic_t *v) \
+{ \
+ __lse_ll_sc_body(op, i, v); \
+}
+
+ATOMIC_OP(atomic_andnot)
+ATOMIC_OP(atomic_or)
+ATOMIC_OP(atomic_xor)
+ATOMIC_OP(atomic_add)
+ATOMIC_OP(atomic_and)
+ATOMIC_OP(atomic_sub)
+
+
+#define ATOMIC_FETCH_OP(name, op) \
+static inline int arch_##op##name(int i, atomic_t *v) \
+{ \
+ return __lse_ll_sc_body(op##name, i, v); \
+}
+
+#define ATOMIC_FETCH_OPS(op) \
+ ATOMIC_FETCH_OP(_relaxed, op) \
+ ATOMIC_FETCH_OP(_acquire, op) \
+ ATOMIC_FETCH_OP(_release, op) \
+ ATOMIC_FETCH_OP( , op)
+
+ATOMIC_FETCH_OPS(atomic_fetch_andnot)
+ATOMIC_FETCH_OPS(atomic_fetch_or)
+ATOMIC_FETCH_OPS(atomic_fetch_xor)
+ATOMIC_FETCH_OPS(atomic_fetch_add)
+ATOMIC_FETCH_OPS(atomic_fetch_and)
+ATOMIC_FETCH_OPS(atomic_fetch_sub)
+ATOMIC_FETCH_OPS(atomic_add_return)
+ATOMIC_FETCH_OPS(atomic_sub_return)
+
+
+#define ATOMIC64_OP(op) \
+static inline void arch_##op(long i, atomic64_t *v) \
+{ \
+ __lse_ll_sc_body(op, i, v); \
+}
+
+ATOMIC64_OP(atomic64_andnot)
+ATOMIC64_OP(atomic64_or)
+ATOMIC64_OP(atomic64_xor)
+ATOMIC64_OP(atomic64_add)
+ATOMIC64_OP(atomic64_and)
+ATOMIC64_OP(atomic64_sub)
+
+
+#define ATOMIC64_FETCH_OP(name, op) \
+static inline long arch_##op##name(long i, atomic64_t *v) \
+{ \
+ return __lse_ll_sc_body(op##name, i, v); \
+}
+
+#define ATOMIC64_FETCH_OPS(op) \
+ ATOMIC64_FETCH_OP(_relaxed, op) \
+ ATOMIC64_FETCH_OP(_acquire, op) \
+ ATOMIC64_FETCH_OP(_release, op) \
+ ATOMIC64_FETCH_OP( , op)
+
+ATOMIC64_FETCH_OPS(atomic64_fetch_andnot)
+ATOMIC64_FETCH_OPS(atomic64_fetch_or)
+ATOMIC64_FETCH_OPS(atomic64_fetch_xor)
+ATOMIC64_FETCH_OPS(atomic64_fetch_add)
+ATOMIC64_FETCH_OPS(atomic64_fetch_and)
+ATOMIC64_FETCH_OPS(atomic64_fetch_sub)
+ATOMIC64_FETCH_OPS(atomic64_add_return)
+ATOMIC64_FETCH_OPS(atomic64_sub_return)
+
+static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
+{
+ return __lse_ll_sc_body(atomic64_dec_if_positive, v);
+}
+
#define ATOMIC_INIT(i) { (i) }
#define arch_atomic_read(v) READ_ONCE((v)->counter)
diff --git a/arch/arm64/include/asm/atomic_arch.h b/arch/arm64/include/asm/atomic_arch.h
deleted file mode 100644
index 1aac7fc65084..000000000000
--- a/arch/arm64/include/asm/atomic_arch.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Selection between LSE and LL/SC atomics.
- *
- * Copyright (C) 2018 ARM Ltd.
- * Author: Andrew Murray <andrew.murray@arm.com>
- */
-
-#ifndef __ASM_ATOMIC_ARCH_H
-#define __ASM_ATOMIC_ARCH_H
-
-
-#include <linux/jump_label.h>
-
-#include <asm/cpucaps.h>
-#include <asm/atomic_ll_sc.h>
-#include <asm/atomic_lse.h>
-
-extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
-extern struct static_key_false arm64_const_caps_ready;
-
-static inline bool system_uses_lse_atomics(void)
-{
- return (IS_ENABLED(CONFIG_ARM64_LSE_ATOMICS) &&
- IS_ENABLED(CONFIG_AS_LSE) &&
- static_branch_likely(&arm64_const_caps_ready)) &&
- static_branch_likely(&cpu_hwcap_keys[ARM64_HAS_LSE_ATOMICS]);
-}
-
-#define __lse_ll_sc_body(op, ...) \
-({ \
- system_uses_lse_atomics() ? \
- __lse_##op(__VA_ARGS__) : \
- __ll_sc_##op(__VA_ARGS__); \
-})
-
-#define ATOMIC_OP(op) \
-static inline void arch_##op(int i, atomic_t *v) \
-{ \
- __lse_ll_sc_body(op, i, v); \
-}
-
-ATOMIC_OP(atomic_andnot)
-ATOMIC_OP(atomic_or)
-ATOMIC_OP(atomic_xor)
-ATOMIC_OP(atomic_add)
-ATOMIC_OP(atomic_and)
-ATOMIC_OP(atomic_sub)
-
-
-#define ATOMIC_FETCH_OP(name, op) \
-static inline int arch_##op##name(int i, atomic_t *v) \
-{ \
- return __lse_ll_sc_body(op##name, i, v); \
-}
-
-#define ATOMIC_FETCH_OPS(op) \
- ATOMIC_FETCH_OP(_relaxed, op) \
- ATOMIC_FETCH_OP(_acquire, op) \
- ATOMIC_FETCH_OP(_release, op) \
- ATOMIC_FETCH_OP( , op)
-
-ATOMIC_FETCH_OPS(atomic_fetch_andnot)
-ATOMIC_FETCH_OPS(atomic_fetch_or)
-ATOMIC_FETCH_OPS(atomic_fetch_xor)
-ATOMIC_FETCH_OPS(atomic_fetch_add)
-ATOMIC_FETCH_OPS(atomic_fetch_and)
-ATOMIC_FETCH_OPS(atomic_fetch_sub)
-ATOMIC_FETCH_OPS(atomic_add_return)
-ATOMIC_FETCH_OPS(atomic_sub_return)
-
-
-#define ATOMIC64_OP(op) \
-static inline void arch_##op(long i, atomic64_t *v) \
-{ \
- __lse_ll_sc_body(op, i, v); \
-}
-
-ATOMIC64_OP(atomic64_andnot)
-ATOMIC64_OP(atomic64_or)
-ATOMIC64_OP(atomic64_xor)
-ATOMIC64_OP(atomic64_add)
-ATOMIC64_OP(atomic64_and)
-ATOMIC64_OP(atomic64_sub)
-
-
-#define ATOMIC64_FETCH_OP(name, op) \
-static inline long arch_##op##name(long i, atomic64_t *v) \
-{ \
- return __lse_ll_sc_body(op##name, i, v); \
-}
-
-#define ATOMIC64_FETCH_OPS(op) \
- ATOMIC64_FETCH_OP(_relaxed, op) \
- ATOMIC64_FETCH_OP(_acquire, op) \
- ATOMIC64_FETCH_OP(_release, op) \
- ATOMIC64_FETCH_OP( , op)
-
-ATOMIC64_FETCH_OPS(atomic64_fetch_andnot)
-ATOMIC64_FETCH_OPS(atomic64_fetch_or)
-ATOMIC64_FETCH_OPS(atomic64_fetch_xor)
-ATOMIC64_FETCH_OPS(atomic64_fetch_add)
-ATOMIC64_FETCH_OPS(atomic64_fetch_and)
-ATOMIC64_FETCH_OPS(atomic64_fetch_sub)
-ATOMIC64_FETCH_OPS(atomic64_add_return)
-ATOMIC64_FETCH_OPS(atomic64_sub_return)
-
-
-static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
-{
- return __lse_ll_sc_body(atomic64_dec_if_positive, v);
-}
-
-#define __CMPXCHG_CASE(name, sz) \
-static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr, \
- u##sz old, \
- u##sz new) \
-{ \
- return __lse_ll_sc_body(_cmpxchg_case_##name##sz, \
- ptr, old, new); \
-}
-
-__CMPXCHG_CASE( , 8)
-__CMPXCHG_CASE( , 16)
-__CMPXCHG_CASE( , 32)
-__CMPXCHG_CASE( , 64)
-__CMPXCHG_CASE(acq_, 8)
-__CMPXCHG_CASE(acq_, 16)
-__CMPXCHG_CASE(acq_, 32)
-__CMPXCHG_CASE(acq_, 64)
-__CMPXCHG_CASE(rel_, 8)
-__CMPXCHG_CASE(rel_, 16)
-__CMPXCHG_CASE(rel_, 32)
-__CMPXCHG_CASE(rel_, 64)
-__CMPXCHG_CASE(mb_, 8)
-__CMPXCHG_CASE(mb_, 16)
-__CMPXCHG_CASE(mb_, 32)
-__CMPXCHG_CASE(mb_, 64)
-
-
-#define __CMPXCHG_DBL(name) \
-static inline long __cmpxchg_double##name(unsigned long old1, \
- unsigned long old2, \
- unsigned long new1, \
- unsigned long new2, \
- volatile void *ptr) \
-{ \
- return __lse_ll_sc_body(_cmpxchg_double##name, \
- old1, old2, new1, new2, ptr); \
-}
-
-__CMPXCHG_DBL( )
-__CMPXCHG_DBL(_mb)
-
-#endif /* __ASM_ATOMIC_LSE_H */
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index e5fff8cd4904..afaba73e0b2c 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -10,7 +10,6 @@
#include <linux/build_bug.h>
#include <linux/compiler.h>
-#include <asm/atomic_arch.h>
#include <asm/barrier.h>
#include <asm/lse.h>
@@ -104,6 +103,46 @@ __XCHG_GEN(_mb)
#define arch_xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__)
#define arch_xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
+#define __CMPXCHG_CASE(name, sz) \
+static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr, \
+ u##sz old, \
+ u##sz new) \
+{ \
+ return __lse_ll_sc_body(_cmpxchg_case_##name##sz, \
+ ptr, old, new); \
+}
+
+__CMPXCHG_CASE( , 8)
+__CMPXCHG_CASE( , 16)
+__CMPXCHG_CASE( , 32)
+__CMPXCHG_CASE( , 64)
+__CMPXCHG_CASE(acq_, 8)
+__CMPXCHG_CASE(acq_, 16)
+__CMPXCHG_CASE(acq_, 32)
+__CMPXCHG_CASE(acq_, 64)
+__CMPXCHG_CASE(rel_, 8)
+__CMPXCHG_CASE(rel_, 16)
+__CMPXCHG_CASE(rel_, 32)
+__CMPXCHG_CASE(rel_, 64)
+__CMPXCHG_CASE(mb_, 8)
+__CMPXCHG_CASE(mb_, 16)
+__CMPXCHG_CASE(mb_, 32)
+__CMPXCHG_CASE(mb_, 64)
+
+#define __CMPXCHG_DBL(name) \
+static inline long __cmpxchg_double##name(unsigned long old1, \
+ unsigned long old2, \
+ unsigned long new1, \
+ unsigned long new2, \
+ volatile void *ptr) \
+{ \
+ return __lse_ll_sc_body(_cmpxchg_double##name, \
+ old1, old2, new1, new2, ptr); \
+}
+
+__CMPXCHG_DBL( )
+__CMPXCHG_DBL(_mb)
+
#define __CMPXCHG_GEN(sfx) \
static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
unsigned long old, \
diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h
index 08e818e53ed7..80b388278149 100644
--- a/arch/arm64/include/asm/lse.h
+++ b/arch/arm64/include/asm/lse.h
@@ -2,22 +2,46 @@
#ifndef __ASM_LSE_H
#define __ASM_LSE_H
+#include <asm/atomic_ll_sc.h>
+
#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
#include <linux/compiler_types.h>
#include <linux/export.h>
+#include <linux/jump_label.h>
#include <linux/stringify.h>
#include <asm/alternative.h>
+#include <asm/atomic_lse.h>
#include <asm/cpucaps.h>
__asm__(".arch_extension lse");
+extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
+extern struct static_key_false arm64_const_caps_ready;
+
+static inline bool system_uses_lse_atomics(void)
+{
+ return (static_branch_likely(&arm64_const_caps_ready)) &&
+ static_branch_likely(&cpu_hwcap_keys[ARM64_HAS_LSE_ATOMICS]);
+}
+
+#define __lse_ll_sc_body(op, ...) \
+({ \
+ system_uses_lse_atomics() ? \
+ __lse_##op(__VA_ARGS__) : \
+ __ll_sc_##op(__VA_ARGS__); \
+})
+
/* In-line patching at runtime */
#define ARM64_LSE_ATOMIC_INSN(llsc, lse) \
ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS)
#else /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
+static inline bool system_uses_lse_atomics(void) { return false; }
+
+#define __lse_ll_sc_body(op, ...) __ll_sc_##op(__VA_ARGS__)
+
#define ARM64_LSE_ATOMIC_INSN(llsc, lse) llsc
#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
--
2.11.0
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
next prev parent reply other threads:[~2019-08-29 15:51 UTC|newest]
Thread overview: 44+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-08-29 15:48 [PATCH v5 00/10] arm64: avoid out-of-line ll/sc atomics Will Deacon
2019-08-29 15:48 ` [PATCH v5 01/10] jump_label: Don't warn on __exit jump entries Will Deacon
2019-08-29 15:48 ` [PATCH v5 02/10] arm64: Use correct ll/sc atomic constraints Will Deacon
2019-08-29 15:48 ` [PATCH v5 03/10] arm64: atomics: avoid out-of-line ll/sc atomics Will Deacon
2019-09-03 6:00 ` Nathan Chancellor
2019-09-03 6:39 ` Will Deacon
2019-09-03 14:31 ` Andrew Murray
2019-09-03 14:45 ` Will Deacon
2019-09-03 15:15 ` Andrew Murray
2019-09-03 15:31 ` Andrew Murray
2019-09-03 16:37 ` Will Deacon
2019-09-03 22:04 ` Andrew Murray
2019-09-03 22:35 ` Nick Desaulniers
[not found] ` <CANW9uyuRFtNKMnSwmHWt_RebJA1ADXdZfeDHc6=yaaFH2NsyWg@mail.gmail.com>
2019-09-03 22:53 ` Nick Desaulniers
2019-09-04 10:20 ` Will Deacon
2019-09-04 17:28 ` Nick Desaulniers
2019-09-05 11:25 ` Andrew Murray
2019-09-06 19:44 ` Nick Desaulniers
2019-08-29 15:48 ` [PATCH v5 04/10] arm64: avoid using hard-coded registers for LSE atomics Will Deacon
2019-08-29 15:48 ` [PATCH v5 05/10] arm64: atomics: Remove atomic_ll_sc compilation unit Will Deacon
2019-08-29 17:47 ` Nick Desaulniers
2019-08-29 20:07 ` Tri Vo
2019-08-29 21:54 ` Will Deacon
2019-08-29 15:48 ` [PATCH v5 06/10] arm64: lse: Remove unused 'alt_lse' assembly macro Will Deacon
2019-08-29 23:39 ` Andrew Murray
2019-08-29 15:48 ` Will Deacon [this message]
2019-08-29 23:43 ` [PATCH v5 07/10] arm64: asm: Kill 'asm/atomic_arch.h' Andrew Murray
2019-08-29 15:48 ` [PATCH v5 08/10] arm64: lse: Make ARM64_LSE_ATOMICS depend on JUMP_LABEL Will Deacon
2019-08-29 23:44 ` Andrew Murray
2019-08-29 15:48 ` [PATCH v5 09/10] arm64: atomics: Undefine internal macros after use Will Deacon
2019-08-29 23:44 ` Andrew Murray
2019-08-29 15:48 ` [PATCH v5 10/10] arm64: atomics: Use K constraint when toolchain appears to support it Will Deacon
2019-08-29 16:54 ` Will Deacon
2019-08-29 17:45 ` Nick Desaulniers
2019-08-29 21:53 ` Will Deacon
2019-08-30 20:57 ` Nick Desaulniers
2019-08-30 0:08 ` Andrew Murray
2019-08-30 7:52 ` Will Deacon
2019-08-30 9:11 ` Andrew Murray
2019-08-30 10:17 ` Will Deacon
2019-08-30 11:57 ` Andrew Murray
2019-08-30 10:40 ` Mark Rutland
2019-08-30 11:53 ` Andrew Murray
2019-08-29 23:49 ` Andrew Murray
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190829154834.26547-8-will@kernel.org \
--to=will@kernel.org \
--cc=Ard.Biesheuvel@arm.com \
--cc=andrew.murray@arm.com \
--cc=catalin.marinas@arm.com \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=mark.rutland@arm.com \
--cc=natechancellor@gmail.com \
--cc=ndesaulniers@google.com \
--cc=peterz@infradead.org \
--cc=robin.murphy@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).