From: Marc Zyngier <maz@kernel.org>
To: Paolo Bonzini <pbonzini@redhat.com>
Cc: Mark Rutland <mark.rutland@arm.com>,
Santosh Shukla <sashukla@nvidia.com>,
Gavin Shan <gshan@redhat.com>,
kvm@vger.kernel.org, Quentin Perret <qperret@google.com>,
kernel-team@android.com,
Suzuki K Poulose <suzuki.poulose@arm.com>,
kvmarm@lists.cs.columbia.edu,
Vladimir Murzin <vladimir.murzin@arm.com>,
James Morse <james.morse@arm.com>,
linux-arm-kernel@lists.infradead.org,
David Brazdil <dbrazdil@google.com>,
Will Deacon <will@kernel.org>, Qais Yousef <qais.yousef@arm.com>,
Julien Thierry <julien.thierry.kdev@gmail.com>
Subject: [PATCH 11/12] arm64: cpufeature: upgrade hyp caps to final
Date: Fri, 30 Oct 2020 16:40:16 +0000 [thread overview]
Message-ID: <20201030164017.244287-12-maz@kernel.org> (raw)
In-Reply-To: <20201030164017.244287-1-maz@kernel.org>
From: Mark Rutland <mark.rutland@arm.com>
We finalize caps before initializing kvm hyp code, and any use of
cpus_have_const_cap() in kvm hyp code generates redundant and
potentially unsound code to read the cpu_hwcaps array.
A number of helper functions used in both hyp context and regular kernel
context use cpus_have_const_cap(), as some regular kernel code runs
before the capabilities are finalized. It's tedious and error-prone to
write separate copies of these for hyp and non-hyp code.
So that we can avoid the redundant code, let's automatically upgrade
cpus_have_const_cap() to cpus_have_final_cap() when used in hyp context.
With this change, there's never a reason to access to cpu_hwcaps array
from hyp code, and we don't need to create an NVHE alias for this.
This should have no effect on non-hyp code.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Acked-by: Will Deacon <will@kernel.org>
Cc: David Brazdil <dbrazdil@google.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20201026134931.28246-4-mark.rutland@arm.com
---
arch/arm64/include/asm/cpufeature.h | 26 ++++++++++++++++++++++++--
arch/arm64/include/asm/virt.h | 12 ------------
arch/arm64/kernel/image-vars.h | 1 -
3 files changed, 24 insertions(+), 15 deletions(-)
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 9f671aa0419b..79d6a0371c78 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -375,6 +375,23 @@ cpucap_multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry,
return false;
}
+static __always_inline bool is_vhe_hyp_code(void)
+{
+ /* Only defined for code run in VHE hyp context */
+ return __is_defined(__KVM_VHE_HYPERVISOR__);
+}
+
+static __always_inline bool is_nvhe_hyp_code(void)
+{
+ /* Only defined for code run in NVHE hyp context */
+ return __is_defined(__KVM_NVHE_HYPERVISOR__);
+}
+
+static __always_inline bool is_hyp_code(void)
+{
+ return is_vhe_hyp_code() || is_nvhe_hyp_code();
+}
+
extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
extern struct static_key_false arm64_const_caps_ready;
@@ -444,8 +461,11 @@ static __always_inline bool cpus_have_final_cap(int num)
}
/*
- * Test for a capability, possibly with a runtime check.
+ * Test for a capability, possibly with a runtime check for non-hyp code.
*
+ * For hyp code, this behaves the same as cpus_have_final_cap().
+ *
+ * For non-hyp code:
* Before capabilities are finalized, this behaves as cpus_have_cap().
* After capabilities are finalized, this is patched to avoid a runtime check.
*
@@ -453,7 +473,9 @@ static __always_inline bool cpus_have_final_cap(int num)
*/
static __always_inline bool cpus_have_const_cap(int num)
{
- if (system_capabilities_finalized())
+ if (is_hyp_code())
+ return cpus_have_final_cap(num);
+ else if (system_capabilities_finalized())
return __cpus_have_const_cap(num);
else
return cpus_have_cap(num);
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 300be14ba77b..6069be50baf9 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -83,18 +83,6 @@ static inline bool is_kernel_in_hyp_mode(void)
return read_sysreg(CurrentEL) == CurrentEL_EL2;
}
-static __always_inline bool is_vhe_hyp_code(void)
-{
- /* Only defined for code run in VHE hyp context */
- return __is_defined(__KVM_VHE_HYPERVISOR__);
-}
-
-static __always_inline bool is_nvhe_hyp_code(void)
-{
- /* Only defined for code run in NVHE hyp context */
- return __is_defined(__KVM_NVHE_HYPERVISOR__);
-}
-
static __always_inline bool has_vhe(void)
{
/*
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index fbd4b6b1fde5..ad8432251733 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -87,7 +87,6 @@ KVM_NVHE_ALIAS(__icache_flags);
/* Kernel symbols needed for cpus_have_final/const_caps checks. */
KVM_NVHE_ALIAS(arm64_const_caps_ready);
KVM_NVHE_ALIAS(cpu_hwcap_keys);
-KVM_NVHE_ALIAS(cpu_hwcaps);
/* Static keys which are set if a vGIC trap should be handled in hyp. */
KVM_NVHE_ALIAS(vgic_v2_cpuif_trap);
--
2.28.0
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
next prev parent reply other threads:[~2020-10-30 16:56 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-10-30 16:40 [GIT PULL] KVM/arm64 fixes for 5.10, take #1 Marc Zyngier
2020-10-30 16:40 ` [PATCH 01/12] KVM: arm64: Don't corrupt tpidr_el2 on failed HVC call Marc Zyngier
2020-10-30 16:40 ` [PATCH 02/12] KVM: arm64: Remove leftover kern_hyp_va() in nVHE TLB invalidation Marc Zyngier
2020-10-30 16:40 ` [PATCH 03/12] KVM: arm64: Drop useless PAN setting on host EL1 to EL2 transition Marc Zyngier
2020-10-30 16:40 ` [PATCH 04/12] KVM: arm64: Allocate stage-2 pgd pages with GFP_KERNEL_ACCOUNT Marc Zyngier
2020-10-30 16:40 ` [PATCH 05/12] KVM: arm64: Fix AArch32 handling of DBGD{CCINT, SCRext} and DBGVCR Marc Zyngier
2020-10-30 16:40 ` [PATCH 06/12] KVM: arm64: Fix masks in stage2_pte_cacheable() Marc Zyngier
2020-10-30 16:40 ` [PATCH 07/12] KVM: arm64: Use fallback mapping sizes for contiguous huge page sizes Marc Zyngier
2020-10-30 16:40 ` [PATCH 08/12] KVM: arm64: Force PTE mapping on fault resulting in a device mapping Marc Zyngier
2020-10-30 16:40 ` [PATCH 09/12] KVM: arm64: Factor out is_{vhe,nvhe}_hyp_code() Marc Zyngier
2020-10-30 16:40 ` [PATCH 10/12] arm64: cpufeature: reorder cpus_have_{const, final}_cap() Marc Zyngier
2020-10-30 16:40 ` Marc Zyngier [this message]
2020-10-30 16:40 ` [PATCH 12/12] KVM: arm64: Handle Asymmetric AArch32 systems Marc Zyngier
2020-10-31 14:35 ` [GIT PULL] KVM/arm64 fixes for 5.10, take #1 Paolo Bonzini
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20201030164017.244287-12-maz@kernel.org \
--to=maz@kernel.org \
--cc=dbrazdil@google.com \
--cc=gshan@redhat.com \
--cc=james.morse@arm.com \
--cc=julien.thierry.kdev@gmail.com \
--cc=kernel-team@android.com \
--cc=kvm@vger.kernel.org \
--cc=kvmarm@lists.cs.columbia.edu \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=mark.rutland@arm.com \
--cc=pbonzini@redhat.com \
--cc=qais.yousef@arm.com \
--cc=qperret@google.com \
--cc=sashukla@nvidia.com \
--cc=suzuki.poulose@arm.com \
--cc=vladimir.murzin@arm.com \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).