From: Marc Zyngier <maz@kernel.org>
To: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: linux-arm-kernel@lists.infradead.org,
linux-kernel@vger.kernel.org, suzuki.poulose@arm.com,
mark.rutland@arm.com, will@kernel.org, catalin.marinas@arm.com,
james.morse@arm.com, steven.price@arm.com
Subject: Re: [RFC V3 13/13] KVM: arm64: Enable FEAT_LPA2 based 52 bits IPA size on 4K and 16K
Date: Mon, 11 Oct 2021 11:16:04 +0100 [thread overview]
Message-ID: <87r1crq32z.wl-maz@kernel.org> (raw)
In-Reply-To: <1632998116-11552-14-git-send-email-anshuman.khandual@arm.com>
On Thu, 30 Sep 2021 11:35:16 +0100,
Anshuman Khandual <anshuman.khandual@arm.com> wrote:
>
> Stage-2 FEAT_LPA2 support is independent and also orthogonal to FEAT_LPA2
> support either in Stage-1 or in the host kernel. Stage-2 IPA range support
> is evaluated from the platform via ID_AA64MMFR0_TGRAN_2_SUPPORTED_LPA2 and
> gets enabled regardless of Stage-1 translation.
>
> Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
> ---
> arch/arm64/include/asm/kvm_pgtable.h | 10 +++++++++-
> arch/arm64/kvm/hyp/pgtable.c | 25 +++++++++++++++++++++++--
> arch/arm64/kvm/reset.c | 14 ++++++++++----
> 3 files changed, 42 insertions(+), 7 deletions(-)
>
> diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h
> index 0277838..78a9d12 100644
> --- a/arch/arm64/include/asm/kvm_pgtable.h
> +++ b/arch/arm64/include/asm/kvm_pgtable.h
> @@ -29,18 +29,26 @@ typedef u64 kvm_pte_t;
>
> #define KVM_PTE_ADDR_MASK GENMASK(47, PAGE_SHIFT)
> #define KVM_PTE_ADDR_51_48 GENMASK(15, 12)
> +#define KVM_PTE_ADDR_51_50 GENMASK(9, 8)
>
> static inline bool kvm_pte_valid(kvm_pte_t pte)
> {
> return pte & KVM_PTE_VALID;
> }
>
> +void set_kvm_lpa2_enabled(void);
> +bool get_kvm_lpa2_enabled(void);
> +
> static inline u64 kvm_pte_to_phys(kvm_pte_t pte)
> {
> u64 pa = pte & KVM_PTE_ADDR_MASK;
>
> - if (PAGE_SHIFT == 16)
> + if (PAGE_SHIFT == 16) {
> pa |= FIELD_GET(KVM_PTE_ADDR_51_48, pte) << 48;
> + } else {
> + if (get_kvm_lpa2_enabled())
Having to do a function call just for this test seems bad, specially
for something that is used so often on the fault path.
Why can't this be made a normal capability that indicates LPA support
for the current page size?
> + pa |= FIELD_GET(KVM_PTE_ADDR_51_50, pte) << 50;
Where are bits 48 and 49?
> + }
>
> return pa;
> }
> diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
> index f8ceebe..58141bf 100644
> --- a/arch/arm64/kvm/hyp/pgtable.c
> +++ b/arch/arm64/kvm/hyp/pgtable.c
> @@ -49,6 +49,18 @@
> #define KVM_INVALID_PTE_OWNER_MASK GENMASK(9, 2)
> #define KVM_MAX_OWNER_ID 1
>
> +static bool kvm_lpa2_enabled;
> +
> +bool get_kvm_lpa2_enabled(void)
> +{
> + return kvm_lpa2_enabled;
> +}
> +
> +void set_kvm_lpa2_enabled(void)
> +{
> + kvm_lpa2_enabled = true;
> +}
> +
> struct kvm_pgtable_walk_data {
> struct kvm_pgtable *pgt;
> struct kvm_pgtable_walker *walker;
> @@ -126,8 +138,12 @@ static kvm_pte_t kvm_phys_to_pte(u64 pa)
> {
> kvm_pte_t pte = pa & KVM_PTE_ADDR_MASK;
>
> - if (PAGE_SHIFT == 16)
> + if (PAGE_SHIFT == 16) {
> pte |= FIELD_PREP(KVM_PTE_ADDR_51_48, pa >> 48);
> + } else {
> + if (get_kvm_lpa2_enabled())
> + pte |= FIELD_PREP(KVM_PTE_ADDR_51_50, pa >> 50);
> + }
>
> return pte;
> }
> @@ -540,6 +556,9 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
> */
> vtcr |= VTCR_EL2_HA;
>
> + if (get_kvm_lpa2_enabled())
> + vtcr |= VTCR_EL2_DS;
> +
> /* Set the vmid bits */
> vtcr |= (get_vmid_bits(mmfr1) == 16) ?
> VTCR_EL2_VS_16BIT :
> @@ -577,7 +596,9 @@ static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot p
> if (prot & KVM_PGTABLE_PROT_W)
> attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W;
>
> - attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S2_SH, sh);
> + if (!get_kvm_lpa2_enabled())
> + attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S2_SH, sh);
> +
> attr |= KVM_PTE_LEAF_ATTR_LO_S2_AF;
> attr |= prot & KVM_PTE_LEAF_ATTR_HI_SW;
> *ptep = attr;
> diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
> index 5ce36b0..97ec387 100644
> --- a/arch/arm64/kvm/reset.c
> +++ b/arch/arm64/kvm/reset.c
> @@ -315,26 +315,32 @@ u32 get_kvm_ipa_limit(void)
>
> int kvm_set_ipa_limit(void)
> {
> - unsigned int parange;
> + unsigned int parange, tgran;
> u64 mmfr0;
>
> mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
> parange = cpuid_feature_extract_unsigned_field(mmfr0,
> ID_AA64MMFR0_PARANGE_SHIFT);
> + tgran = cpuid_feature_extract_unsigned_field(mmfr0,
> + ID_AA64MMFR0_TGRAN_2_SHIFT);
> /*
> * IPA size beyond 48 bits could not be supported
> * on either 4K or 16K page size. Hence let's cap
> * it to 48 bits, in case it's reported as larger
> * on the system.
Shouldn't you fix this comment?
> */
> - if (PAGE_SIZE != SZ_64K)
> - parange = min(parange, (unsigned int)ID_AA64MMFR0_PARANGE_48);
> + if (PAGE_SIZE != SZ_64K) {
> + if (tgran == ID_AA64MMFR0_TGRAN_2_SUPPORTED_LPA2)
> + set_kvm_lpa2_enabled();
> + else
> + parange = min(parange, (unsigned int)ID_AA64MMFR0_PARANGE_48);
> + }
>
> /*
> * Check with ARMv8.5-GTG that our PAGE_SIZE is supported at
> * Stage-2. If not, things will stop very quickly.
> */
> - switch (cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_TGRAN_2_SHIFT)) {
> + switch (tgran) {
> case ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE:
> kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n");
> return -EINVAL;
Another thing I don't see is how you manage TLB invalidation by level
now that we gain a level 0 at 4kB, breaking the current assumptions
encoded in __tlbi_level().
M.
--
Without deviation from the norm, progress is not possible.
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
next prev parent reply other threads:[~2021-10-11 10:18 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-09-30 10:35 [RFC V3 00/13] arm64/mm: Enable FEAT_LPA2 (52 bits PA support on 4K|16K pages) Anshuman Khandual
2021-09-30 10:35 ` [RFC V3 01/13] arm64/mm: Dynamically initialize protection_map[] Anshuman Khandual
2021-09-30 10:35 ` [RFC V3 02/13] arm64/mm: Consolidate TCR_EL1 fields Anshuman Khandual
2021-09-30 10:35 ` [RFC V3 03/13] arm64/mm: Add FEAT_LPA2 specific TCR_EL1.DS field Anshuman Khandual
2021-09-30 10:35 ` [RFC V3 04/13] arm64/mm: Add FEAT_LPA2 specific VTCR_EL2.DS field Anshuman Khandual
2021-09-30 10:35 ` [RFC V3 05/13] arm64/mm: Add FEAT_LPA2 specific ID_AA64MMFR0.TGRAN[2] Anshuman Khandual
2021-09-30 10:35 ` [RFC V3 06/13] arm64/mm: Add CONFIG_ARM64_PA_BITS_52_[LPA|LPA2] Anshuman Khandual
2021-09-30 10:35 ` [RFC V3 07/13] arm64/mm: Add FEAT_LPA2 specific encoding Anshuman Khandual
2021-10-12 10:41 ` Suzuki K Poulose
2021-10-13 2:55 ` Anshuman Khandual
2021-09-30 10:35 ` [RFC V3 08/13] arm64/mm: Detect and enable FEAT_LPA2 Anshuman Khandual
2021-09-30 10:35 ` [RFC V3 09/13] arm64/mm: Add __cpu_secondary_check52bitpa() Anshuman Khandual
2021-09-30 10:35 ` [RFC V3 10/13] arm64/mm: Add FEAT_LPA2 specific PTE_SHARED and PMD_SECT_S Anshuman Khandual
2021-09-30 10:35 ` [RFC V3 11/13] arm64/mm: Add FEAT_LPA2 specific fallback (48 bits PA) when not implemented Anshuman Khandual
2021-09-30 10:35 ` [RFC V3 12/13] arm64/mm: Enable CONFIG_ARM64_PA_BITS_52 on CONFIG_ARM64_[4K|16K]_PAGES Anshuman Khandual
2021-09-30 10:35 ` [RFC V3 13/13] KVM: arm64: Enable FEAT_LPA2 based 52 bits IPA size on 4K and 16K Anshuman Khandual
2021-10-11 10:16 ` Marc Zyngier [this message]
2021-10-12 4:24 ` Anshuman Khandual
2021-10-12 8:30 ` Marc Zyngier
2021-10-13 3:28 ` Anshuman Khandual
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=87r1crq32z.wl-maz@kernel.org \
--to=maz@kernel.org \
--cc=anshuman.khandual@arm.com \
--cc=catalin.marinas@arm.com \
--cc=james.morse@arm.com \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mark.rutland@arm.com \
--cc=steven.price@arm.com \
--cc=suzuki.poulose@arm.com \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).