public inbox for stable@vger.kernel.org
 help / color / mirror / Atom feed
From: Sasha Levin <sashal@kernel.org>
To: stable@vger.kernel.org
Cc: Mark Brown <broonie@kernel.org>, Sasha Levin <sashal@kernel.org>
Subject: Re: [PATCH 6.12 v2 1/8] KVM: arm64: Calculate cptr_el2 traps on activating traps
Date: Fri, 21 Mar 2025 13:28:43 -0400	[thread overview]
Message-ID: <20250321105451-9d201caa2870db2f@stable.kernel.org> (raw)
In-Reply-To: <20250321-stable-sve-6-12-v2-1-417ca2278d18@kernel.org>

[ Sasha's backport helper bot ]

Hi,

✅ All tests passed successfully. No issues detected.
No action required from the submitter.

The upstream commit SHA1 provided is correct: 2fd5b4b0e7b440602455b79977bfa64dea101e6c

WARNING: Author mismatch between patch and upstream commit:
Backport author: Mark Brown<broonie@kernel.org>
Commit author: Fuad Tabba<tabba@google.com>

Status in newer kernel trees:
6.13.y | Present (different SHA1: 341a0c20c99b)

Note: The patch differs from the upstream commit:
---
1:  2fd5b4b0e7b44 ! 1:  2e3cec78e1491 KVM: arm64: Calculate cptr_el2 traps on activating traps
    @@ Metadata
      ## Commit message ##
         KVM: arm64: Calculate cptr_el2 traps on activating traps
     
    +    [ Upstream commit 2fd5b4b0e7b440602455b79977bfa64dea101e6c ]
    +
         Similar to VHE, calculate the value of cptr_el2 from scratch on
         activate traps. This removes the need to store cptr_el2 in every
         vcpu structure. Moreover, some traps, such as whether the guest
    @@ Commit message
         Signed-off-by: Fuad Tabba <tabba@google.com>
         Link: https://lore.kernel.org/r/20241216105057.579031-13-tabba@google.com
         Signed-off-by: Marc Zyngier <maz@kernel.org>
    +    Signed-off-by: Mark Brown <broonie@kernel.org>
     
      ## arch/arm64/include/asm/kvm_host.h ##
     @@ arch/arm64/include/asm/kvm_host.h: struct kvm_vcpu_arch {
    @@ arch/arm64/kvm/arm.c: static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *
      	 * Handle the "start in power-off" case.
     
      ## arch/arm64/kvm/hyp/nvhe/pkvm.c ##
    -@@ arch/arm64/kvm/hyp/nvhe/pkvm.c: static void pvm_init_traps_hcr(struct kvm_vcpu *vcpu)
    - 	vcpu->arch.hcr_el2 = val;
    - }
    - 
    --static void pvm_init_traps_cptr(struct kvm_vcpu *vcpu)
    --{
    --	struct kvm *kvm = vcpu->kvm;
    --	u64 val = vcpu->arch.cptr_el2;
    --
    --	if (!has_hvhe()) {
    --		val |= CPTR_NVHE_EL2_RES1;
    --		val &= ~(CPTR_NVHE_EL2_RES0);
    +@@ arch/arm64/kvm/hyp/nvhe/pkvm.c: static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
    + 	const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1);
    + 	u64 hcr_set = HCR_RW;
    + 	u64 hcr_clear = 0;
    +-	u64 cptr_set = 0;
    +-	u64 cptr_clear = 0;
    + 
    + 	/* Protected KVM does not support AArch32 guests. */
    + 	BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
    +@@ arch/arm64/kvm/hyp/nvhe/pkvm.c: static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
    + 	/* Trap AMU */
    + 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) {
    + 		hcr_clear |= HCR_AMVOFFEN;
    +-		cptr_set |= CPTR_EL2_TAM;
     -	}
     -
    --	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP))
    --		val |= CPTR_EL2_TAM;
    --
    --	/* SVE can be disabled by userspace even if supported. */
    --	if (!vcpu_has_sve(vcpu)) {
    +-	/* Trap SVE */
    +-	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) {
     -		if (has_hvhe())
    --			val &= ~(CPACR_ELx_ZEN);
    +-			cptr_clear |= CPACR_ELx_ZEN;
     -		else
    --			val |= CPTR_EL2_TZ;
    --	}
    --
    --	/* No SME support in KVM. */
    --	BUG_ON(kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP));
    --	if (has_hvhe())
    --		val &= ~(CPACR_ELx_SMEN);
    --	else
    --		val |= CPTR_EL2_TSM;
    --
    --	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceVer, IMP)) {
    +-			cptr_set |= CPTR_EL2_TZ;
    + 	}
    + 
    + 	vcpu->arch.hcr_el2 |= hcr_set;
    + 	vcpu->arch.hcr_el2 &= ~hcr_clear;
    +-	vcpu->arch.cptr_el2 |= cptr_set;
    +-	vcpu->arch.cptr_el2 &= ~cptr_clear;
    + }
    + 
    + /*
    +@@ arch/arm64/kvm/hyp/nvhe/pkvm.c: static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
    + 	const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1);
    + 	u64 mdcr_set = 0;
    + 	u64 mdcr_clear = 0;
    +-	u64 cptr_set = 0;
    + 
    + 	/* Trap/constrain PMU */
    + 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) {
    +@@ arch/arm64/kvm/hyp/nvhe/pkvm.c: static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
    + 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids))
    + 		mdcr_set |= MDCR_EL2_TTRF;
    + 
    +-	/* Trap Trace */
    +-	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids)) {
     -		if (has_hvhe())
    --			val |= CPACR_EL1_TTA;
    +-			cptr_set |= CPACR_EL1_TTA;
     -		else
    --			val |= CPTR_EL2_TTA;
    +-			cptr_set |= CPTR_EL2_TTA;
     -	}
     -
    --	vcpu->arch.cptr_el2 = val;
    --}
    --
    - static void pvm_init_traps_mdcr(struct kvm_vcpu *vcpu)
    - {
    - 	struct kvm *kvm = vcpu->kvm;
    -@@ arch/arm64/kvm/hyp/nvhe/pkvm.c: static int pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu *hyp_vcpu)
    - 	struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
    - 	int ret;
    + 	/* Trap External Trace */
    + 	if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_ExtTrcBuff), feature_ids))
    + 		mdcr_clear |= MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT;
      
    --	vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
    - 	vcpu->arch.mdcr_el2 = 0;
    - 
    - 	pkvm_vcpu_reset_hcr(vcpu);
    -@@ arch/arm64/kvm/hyp/nvhe/pkvm.c: static int pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu *hyp_vcpu)
    - 		return ret;
    + 	vcpu->arch.mdcr_el2 |= mdcr_set;
    + 	vcpu->arch.mdcr_el2 &= ~mdcr_clear;
    +-	vcpu->arch.cptr_el2 |= cptr_set;
    + }
      
    - 	pvm_init_traps_hcr(vcpu);
    --	pvm_init_traps_cptr(vcpu);
    - 	pvm_init_traps_mdcr(vcpu);
    + /*
    +@@ arch/arm64/kvm/hyp/nvhe/pkvm.c: static void pvm_init_trap_regs(struct kvm_vcpu *vcpu)
    + 	/* Clear res0 and set res1 bits to trap potential new features. */
    + 	vcpu->arch.hcr_el2 &= ~(HCR_RES0);
    + 	vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0);
    +-	if (!has_hvhe()) {
    +-		vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
    +-		vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
    +-	}
    + }
      
    - 	return 0;
    + /*
     @@ arch/arm64/kvm/hyp/nvhe/pkvm.c: int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
      		return ret;
      	}
---

Results of testing on various branches:

| Branch                    | Patch Apply | Build Test |
|---------------------------|-------------|------------|
| stable/linux-6.12.y       |  Success    |  Success   |

  reply	other threads:[~2025-03-21 17:28 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-03-21  0:12 [PATCH 6.12 v2 0/8] KVM: arm64: Backport of SVE fixes to v6.12 Mark Brown
2025-03-21  0:12 ` [PATCH 6.12 v2 1/8] KVM: arm64: Calculate cptr_el2 traps on activating traps Mark Brown
2025-03-21 17:28   ` Sasha Levin [this message]
2025-03-21  0:12 ` [PATCH 6.12 v2 2/8] KVM: arm64: Unconditionally save+flush host FPSIMD/SVE/SME state Mark Brown
2025-03-21 17:27   ` Sasha Levin
2025-03-21  0:12 ` [PATCH 6.12 v2 3/8] KVM: arm64: Remove host FPSIMD saving for non-protected KVM Mark Brown
2025-03-21  0:15   ` kernel test robot
2025-03-21  0:21   ` Mark Brown
2025-03-21 17:26   ` Sasha Levin
2025-03-21  0:13 ` [PATCH 6.12 v2 4/8] KVM: arm64: Remove VHE host restore of CPACR_EL1.ZEN Mark Brown
2025-03-21 17:26   ` Sasha Levin
2025-03-21  0:13 ` [PATCH 6.12 v2 5/8] KVM: arm64: Remove VHE host restore of CPACR_EL1.SMEN Mark Brown
2025-03-21 17:29   ` Sasha Levin
2025-03-21  0:13 ` [PATCH 6.12 v2 6/8] KVM: arm64: Refactor exit handlers Mark Brown
2025-03-21 17:25   ` Sasha Levin
2025-03-21  0:13 ` [PATCH 6.12 v2 7/8] KVM: arm64: Mark some header functions as inline Mark Brown
2025-03-21 17:27   ` Sasha Levin
2025-03-21  0:13 ` [PATCH 6.12 v2 8/8] KVM: arm64: Eagerly switch ZCR_EL{1,2} Mark Brown
2025-03-21 17:29   ` Sasha Levin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250321105451-9d201caa2870db2f@stable.kernel.org \
    --to=sashal@kernel.org \
    --cc=broonie@kernel.org \
    --cc=stable@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox