From: Mark Rutland <mark.rutland@arm.com>
To: linux-arm-kernel@lists.infradead.org
Cc: ardb@kernel.org, bertrand.marquis@arm.com,
boris.ostrovsky@oracle.com, broonie@kernel.org,
catalin.marinas@arm.com, daniel.lezcano@linaro.org,
james.morse@arm.com, jgross@suse.com, mark.rutland@arm.com,
maz@kernel.org, oliver.upton@linux.dev, pcc@google.com,
sstabellini@kernel.org, suzuki.poulose@arm.com,
tglx@linutronix.de, vladimir.murzin@arm.com, will@kernel.org
Subject: [PATCH 04/37] arm64: Add cpucap_is_possible()
Date: Tue, 19 Sep 2023 10:28:17 +0100 [thread overview]
Message-ID: <20230919092850.1940729-5-mark.rutland@arm.com> (raw)
In-Reply-To: <20230919092850.1940729-1-mark.rutland@arm.com>
Many cpucaps can only be set when certain CONFIG_* options are selected,
and we need to check the CONFIG_* option before the cap in order to
avoid generating redundant code. Due to this, we have a growing number
of helpers in <asm/cpufeature.h> of the form:
| static __always_inline bool system_supports_foo(void)
| {
| return IS_ENABLED(CONFIG_ARM64_FOO) &&
| cpus_have_const_cap(ARM64_HAS_FOO);
| }
This is unfortunate as it forces us to use cpus_have_const_cap()
unnecessarily, resulting in redundant code being generated by the
compiler. In the vast majority of cases, we only require that feature
checks indicate the presence of a feature after cpucaps have been
finalized, and so it would be sufficient to use alternative_has_cap_*().
However some code needs to handle a feature before alternatives have
been patched, and must test the system_cpucaps bitmap via
cpus_have_const_cap(). In other cases we'd like to check for
unintentional usage of a cpucap before alternatives are patched, and so
it would be preferable to use cpus_have_final_cap().
Placing the IS_ENABLED() checks in each callsite is tedious and
error-prone, and the same applies for writing wrappers for each
comination of cpucap and alternative_has_cap_*() / cpus_have_cap() /
cpus_have_final_cap(). It would be nicer if we could centralize the
knowledge of which cpucaps are possible, and have
alternative_has_cap_*(), cpus_have_cap(), and cpus_have_final_cap()
handle this automatically.
This patch adds a new cpucap_is_possible() function which will be
responsible for checking the CONFIG_* option, and updates the low-level
cpucap checks to use this. The existing CONFIG_* checks in
<asm/cpufeature.h> are moved over to cpucap_is_possible(), but the (now
trival) wrapper functions are retained for now.
There should be no functional change as a result of this patch alone.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Mark Brown <broonie@kernel.org>
Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
Cc: Will Deacon <will@kernel.org>
---
arch/arm64/include/asm/alternative-macros.h | 8 ++--
arch/arm64/include/asm/cpucaps.h | 41 +++++++++++++++++++++
arch/arm64/include/asm/cpufeature.h | 39 +++++++-------------
3 files changed, 59 insertions(+), 29 deletions(-)
diff --git a/arch/arm64/include/asm/alternative-macros.h b/arch/arm64/include/asm/alternative-macros.h
index 94b486192e1f1..210bb43cff2c7 100644
--- a/arch/arm64/include/asm/alternative-macros.h
+++ b/arch/arm64/include/asm/alternative-macros.h
@@ -226,8 +226,8 @@ alternative_endif
static __always_inline bool
alternative_has_cap_likely(const unsigned long cpucap)
{
- compiletime_assert(cpucap < ARM64_NCAPS,
- "cpucap must be < ARM64_NCAPS");
+ if (!cpucap_is_possible(cpucap))
+ return false;
asm_volatile_goto(
ALTERNATIVE_CB("b %l[l_no]", %[cpucap], alt_cb_patch_nops)
@@ -244,8 +244,8 @@ alternative_has_cap_likely(const unsigned long cpucap)
static __always_inline bool
alternative_has_cap_unlikely(const unsigned long cpucap)
{
- compiletime_assert(cpucap < ARM64_NCAPS,
- "cpucap must be < ARM64_NCAPS");
+ if (!cpucap_is_possible(cpucap))
+ return false;
asm_volatile_goto(
ALTERNATIVE("nop", "b %l[l_yes]", %[cpucap])
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 7333b5bbf4488..764ad4eef8591 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -5,4 +5,45 @@
#include <asm/cpucap-defs.h>
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+/*
+ * Check whether a cpucap is possible at compiletime.
+ */
+static __always_inline bool
+cpucap_is_possible(const unsigned int cap)
+{
+ compiletime_assert(__builtin_constant_p(cap),
+ "cap must be a constant");
+ compiletime_assert(cap < ARM64_NCAPS,
+ "cap must be < ARM64_NCAPS");
+
+ switch (cap) {
+ case ARM64_HAS_PAN:
+ return IS_ENABLED(CONFIG_ARM64_PAN);
+ case ARM64_SVE:
+ return IS_ENABLED(CONFIG_ARM64_SVE);
+ case ARM64_SME:
+ case ARM64_SME2:
+ case ARM64_SME_FA64:
+ return IS_ENABLED(CONFIG_ARM64_SME);
+ case ARM64_HAS_CNP:
+ return IS_ENABLED(CONFIG_ARM64_CNP);
+ case ARM64_HAS_ADDRESS_AUTH:
+ case ARM64_HAS_GENERIC_AUTH:
+ return IS_ENABLED(CONFIG_ARM64_PTR_AUTH);
+ case ARM64_HAS_GIC_PRIO_MASKING:
+ return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI);
+ case ARM64_MTE:
+ return IS_ENABLED(CONFIG_ARM64_MTE);
+ case ARM64_BTI:
+ return IS_ENABLED(CONFIG_ARM64_BTI);
+ case ARM64_HAS_TLB_RANGE:
+ return IS_ENABLED(CONFIG_ARM64_TLB_RANGE);
+ }
+
+ return true;
+}
+#endif /* __ASSEMBLY__ */
+
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 96e50227f940e..7d5317bc2429f 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -450,6 +450,8 @@ static __always_inline bool system_capabilities_finalized(void)
*/
static __always_inline bool cpus_have_cap(unsigned int num)
{
+ if (__builtin_constant_p(num) && !cpucap_is_possible(num))
+ return false;
if (num >= ARM64_NCAPS)
return false;
return arch_test_bit(num, system_cpucaps);
@@ -465,8 +467,6 @@ static __always_inline bool cpus_have_cap(unsigned int num)
*/
static __always_inline bool __cpus_have_const_cap(int num)
{
- if (num >= ARM64_NCAPS)
- return false;
return alternative_has_cap_unlikely(num);
}
@@ -740,8 +740,7 @@ static __always_inline bool system_supports_fpsimd(void)
static inline bool system_uses_hw_pan(void)
{
- return IS_ENABLED(CONFIG_ARM64_PAN) &&
- cpus_have_const_cap(ARM64_HAS_PAN);
+ return cpus_have_const_cap(ARM64_HAS_PAN);
}
static inline bool system_uses_ttbr0_pan(void)
@@ -752,26 +751,22 @@ static inline bool system_uses_ttbr0_pan(void)
static __always_inline bool system_supports_sve(void)
{
- return IS_ENABLED(CONFIG_ARM64_SVE) &&
- cpus_have_const_cap(ARM64_SVE);
+ return cpus_have_const_cap(ARM64_SVE);
}
static __always_inline bool system_supports_sme(void)
{
- return IS_ENABLED(CONFIG_ARM64_SME) &&
- cpus_have_const_cap(ARM64_SME);
+ return cpus_have_const_cap(ARM64_SME);
}
static __always_inline bool system_supports_sme2(void)
{
- return IS_ENABLED(CONFIG_ARM64_SME) &&
- cpus_have_const_cap(ARM64_SME2);
+ return cpus_have_const_cap(ARM64_SME2);
}
static __always_inline bool system_supports_fa64(void)
{
- return IS_ENABLED(CONFIG_ARM64_SME) &&
- cpus_have_const_cap(ARM64_SME_FA64);
+ return cpus_have_const_cap(ARM64_SME_FA64);
}
static __always_inline bool system_supports_tpidr2(void)
@@ -781,20 +776,17 @@ static __always_inline bool system_supports_tpidr2(void)
static __always_inline bool system_supports_cnp(void)
{
- return IS_ENABLED(CONFIG_ARM64_CNP) &&
- cpus_have_const_cap(ARM64_HAS_CNP);
+ return cpus_have_const_cap(ARM64_HAS_CNP);
}
static inline bool system_supports_address_auth(void)
{
- return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
- cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH);
+ return cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH);
}
static inline bool system_supports_generic_auth(void)
{
- return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
- cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH);
+ return cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH);
}
static inline bool system_has_full_ptr_auth(void)
@@ -804,14 +796,12 @@ static inline bool system_has_full_ptr_auth(void)
static __always_inline bool system_uses_irq_prio_masking(void)
{
- return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
- cpus_have_const_cap(ARM64_HAS_GIC_PRIO_MASKING);
+ return cpus_have_const_cap(ARM64_HAS_GIC_PRIO_MASKING);
}
static inline bool system_supports_mte(void)
{
- return IS_ENABLED(CONFIG_ARM64_MTE) &&
- cpus_have_const_cap(ARM64_MTE);
+ return cpus_have_const_cap(ARM64_MTE);
}
static inline bool system_has_prio_mask_debugging(void)
@@ -822,13 +812,12 @@ static inline bool system_has_prio_mask_debugging(void)
static inline bool system_supports_bti(void)
{
- return IS_ENABLED(CONFIG_ARM64_BTI) && cpus_have_const_cap(ARM64_BTI);
+ return cpus_have_const_cap(ARM64_BTI);
}
static inline bool system_supports_tlb_range(void)
{
- return IS_ENABLED(CONFIG_ARM64_TLB_RANGE) &&
- cpus_have_const_cap(ARM64_HAS_TLB_RANGE);
+ return cpus_have_const_cap(ARM64_HAS_TLB_RANGE);
}
int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
--
2.30.2
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
next prev parent reply other threads:[~2023-09-19 9:30 UTC|newest]
Thread overview: 56+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-09-19 9:28 [PATCH 00/37] arm64: Remove cpus_have_const_cap() Mark Rutland
2023-09-19 9:28 ` [PATCH 01/37] clocksource/drivers/arm_arch_timer: Initialize evtstrm after finalizing cpucaps Mark Rutland
2023-09-21 7:41 ` Marc Zyngier
2023-09-21 16:27 ` Mark Rutland
2023-09-19 9:28 ` [PATCH 02/37] arm64/arm: xen: enlighten: Fix KPTI checks Mark Rutland
2023-09-19 9:28 ` [PATCH 03/37] arm64: Factor out cpucap definitions Mark Rutland
2023-09-19 9:28 ` Mark Rutland [this message]
2023-09-19 9:28 ` [PATCH 05/37] arm64: Add cpus_have_final_boot_cap() Mark Rutland
2023-09-21 9:13 ` Suzuki K Poulose
2023-09-21 16:36 ` Mark Rutland
2023-09-22 10:26 ` Suzuki K Poulose
2023-10-02 10:25 ` Mark Rutland
2023-10-05 9:23 ` Mark Rutland
2023-10-05 9:39 ` Suzuki K Poulose
2023-09-19 9:28 ` [PATCH 06/37] arm64: Rework setup_cpu_features() Mark Rutland
2023-09-25 13:04 ` Suzuki K Poulose
2023-09-19 9:28 ` [PATCH 07/37] arm64: Fixup user features at boot time Mark Rutland
2023-09-19 9:28 ` [PATCH 08/37] arm64: Split kpti_install_ng_mappings() Mark Rutland
2023-09-19 9:28 ` [PATCH 09/37] arm64: kvm: Use cpus_have_final_cap() explicitly Mark Rutland
2023-09-21 7:49 ` Marc Zyngier
2023-09-19 9:28 ` [PATCH 10/37] arm64: Explicitly save/restore CPACR when probing SVE and SME Mark Rutland
2023-09-19 9:28 ` [PATCH 11/37] arm64: Rename SVE/SME cpu_enable functions Mark Rutland
2023-09-19 10:52 ` Mark Brown
2023-09-21 16:50 ` Mark Rutland
2023-09-19 9:28 ` [PATCH 12/37] arm64: Use a positive cpucap for FP/SIMD Mark Rutland
2023-09-19 11:21 ` Mark Brown
2023-09-19 9:28 ` [PATCH 13/37] arm64: Avoid cpus_have_const_cap() for ARM64_HAS_{ADDRESS,GENERIC}_AUTH Mark Rutland
2023-09-19 9:28 ` [PATCH 14/37] arm64: Avoid cpus_have_const_cap() for ARM64_HAS_ARMv8_4_TTL Mark Rutland
2023-09-19 9:28 ` [PATCH 15/37] arm64: Avoid cpus_have_const_cap() for ARM64_HAS_BTI Mark Rutland
2023-09-19 11:23 ` Mark Brown
2023-09-19 9:28 ` [PATCH 16/37] arm64: Avoid cpus_have_const_cap() for ARM64_HAS_CACHE_DIC Mark Rutland
2023-09-19 9:28 ` [PATCH 17/37] arm64: Avoid cpus_have_const_cap() for ARM64_HAS_CNP Mark Rutland
2023-09-19 9:28 ` [PATCH 18/37] arm64: Avoid cpus_have_const_cap() for ARM64_HAS_DIT Mark Rutland
2023-09-19 9:28 ` [PATCH 19/37] arm64: Avoid cpus_have_const_cap() for ARM64_HAS_GIC_PRIO_MASKING Mark Rutland
2023-09-19 9:28 ` [PATCH 20/37] arm64: Avoid cpus_have_const_cap() for ARM64_HAS_PAN Mark Rutland
2023-09-19 9:28 ` [PATCH 21/37] arm64: Avoid cpus_have_const_cap() for ARM64_HAS_EPAN Mark Rutland
2023-09-19 9:28 ` [PATCH 22/37] arm64: Avoid cpus_have_const_cap() for ARM64_HAS_RNG Mark Rutland
2023-09-19 11:24 ` Mark Brown
2023-09-19 9:28 ` [PATCH 23/37] arm64: Avoid cpus_have_const_cap() for ARM64_HAS_WFXT Mark Rutland
2023-09-19 9:28 ` [PATCH 24/37] arm64: Avoid cpus_have_const_cap() for ARM64_HAS_TLB_RANGE Mark Rutland
2023-09-19 9:28 ` [PATCH 25/37] arm64: Avoid cpus_have_const_cap() for ARM64_MTE Mark Rutland
2023-09-19 9:28 ` [PATCH 26/37] arm64: Avoid cpus_have_const_cap() for ARM64_SSBS Mark Rutland
2023-09-19 9:28 ` [PATCH 27/37] arm64: Avoid cpus_have_const_cap() for ARM64_SPECTRE_V2 Mark Rutland
2023-09-19 9:28 ` [PATCH 28/37] arm64: Avoid cpus_have_const_cap() for ARM64_{SVE,SME,SME2,FA64} Mark Rutland
2023-09-19 11:27 ` Mark Brown
2023-09-19 9:28 ` [PATCH 29/37] arm64: Avoid cpus_have_const_cap() for ARM64_UNMAP_KERNEL_AT_EL0 Mark Rutland
2023-09-19 9:28 ` [PATCH 30/37] arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_843419 Mark Rutland
2023-09-19 9:28 ` [PATCH 31/37] arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_1542419 Mark Rutland
2023-09-19 9:28 ` [PATCH 32/37] arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_1742098 Mark Rutland
2023-09-19 9:28 ` [PATCH 33/37] arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_2645198 Mark Rutland
2023-09-19 9:28 ` [PATCH 34/37] arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_CAVIUM_23154 Mark Rutland
2023-09-19 9:28 ` [PATCH 35/37] arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_NVIDIA_CARMEL_CNP Mark Rutland
2023-09-19 9:28 ` [PATCH 36/37] arm64: Avoid cpus_have_const_cap() for ARM64_WORKAROUND_REPEAT_TLBI Mark Rutland
2023-09-19 9:28 ` [PATCH 37/37] arm64: Remove cpus_have_const_cap() Mark Rutland
2023-10-03 17:20 ` Kristina Martsenko
2023-10-05 9:35 ` Mark Rutland
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230919092850.1940729-5-mark.rutland@arm.com \
--to=mark.rutland@arm.com \
--cc=ardb@kernel.org \
--cc=bertrand.marquis@arm.com \
--cc=boris.ostrovsky@oracle.com \
--cc=broonie@kernel.org \
--cc=catalin.marinas@arm.com \
--cc=daniel.lezcano@linaro.org \
--cc=james.morse@arm.com \
--cc=jgross@suse.com \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=maz@kernel.org \
--cc=oliver.upton@linux.dev \
--cc=pcc@google.com \
--cc=sstabellini@kernel.org \
--cc=suzuki.poulose@arm.com \
--cc=tglx@linutronix.de \
--cc=vladimir.murzin@arm.com \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).