public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
From: Paolo Bonzini <pbonzini@redhat.com>
To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org
Cc: d.riley@proxmox.com, jon@nutanix.com
Subject: [PATCH 25/28] KVM: x86/mmu: add support for GMET to NPT page table walks
Date: Thu, 30 Apr 2026 11:07:44 -0400	[thread overview]
Message-ID: <20260430150747.76749-26-pbonzini@redhat.com> (raw)
In-Reply-To: <20260430150747.76749-1-pbonzini@redhat.com>

GMET allows page table entries to be created with U=0 in NPT.
However, when GMET=1 U=0 only affects execution, not reads or
writes.  Ignore user faults on non-fetch accesses for NPT GMET.

Tested-by: David Riley <d.riley@proxmox.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/include/asm/kvm_host.h |  2 ++
 arch/x86/kvm/mmu.h              |  2 +-
 arch/x86/kvm/mmu/mmu.c          | 18 ++++++++++++------
 arch/x86/kvm/svm/nested.c       | 10 +++++++---
 4 files changed, 22 insertions(+), 10 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 7dde4ca87752..1da3d5c59e15 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -370,6 +370,8 @@ union kvm_mmu_page_role {
 		 * cr4_smep is also set for EPT MBEC.  Because it affects
 		 * which pages are considered non-present (bit 10 additionally
 		 * must be zero if MBEC is on) it has to be in the base role.
+		 * It also has to be in the base role for AMD GMET because
+		 * kernel-executable pages need to have U=0 with GMET enabled.
 		 */
 		unsigned cr4_smep:1;
 
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 1b354e1f2d81..ddf4e467c071 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -97,7 +97,7 @@ void kvm_mmu_set_ept_masks(bool has_ad_bits);
 
 void kvm_init_mmu(struct kvm_vcpu *vcpu);
 void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr4,
-			     u64 efer, gpa_t nested_cr3);
+			     u64 efer, gpa_t nested_cr3, u64 misc_ctl);
 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
 			     int huge_page_level, bool accessed_dirty,
 			     bool mbec, gpa_t new_eptp);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 32845edd14fa..015085ef6e46 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -55,6 +55,7 @@
 #include <asm/io.h>
 #include <asm/set_memory.h>
 #include <asm/spec-ctrl.h>
+#include <asm/svm.h>
 #include <asm/vmx.h>
 
 #include "trace.h"
@@ -5572,7 +5573,7 @@ reset_ept_shadow_zero_bits_mask(struct kvm_mmu *context, bool execonly)
 	 (14 & (access) ? 1 << 14 : 0) | \
 	 (15 & (access) ? 1 << 15 : 0))
 
-static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept)
+static void update_permission_bitmask(struct kvm_mmu *mmu, bool tdp, bool ept)
 {
 	unsigned byte;
 
@@ -5633,7 +5634,12 @@ static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept)
 			/* Faults from kernel mode accesses to user pages */
 			u16 kf = (pfec & PFERR_USER_MASK) ? 0 : u;
 
-			uf = (pfec & PFERR_USER_MASK) ? (u16)~u : 0;
+			/*
+			 * For NPT GMET, U=0 does not affect reads and writes.  Fetches
+			 * are handled below via cr4_smep.
+			 */
+			if (!(tdp && cr4_smep))
+				uf = (pfec & PFERR_USER_MASK) ? (u16)~u : 0;
 
 			if (efer_nx)
 				ff = (pfec & PFERR_FETCH_MASK) ? (u16)~x : 0;
@@ -5744,7 +5750,7 @@ static void reset_guest_paging_metadata(struct kvm_vcpu *vcpu,
 		return;
 
 	reset_guest_rsvds_bits_mask(vcpu, mmu);
-	update_permission_bitmask(mmu, false);
+	update_permission_bitmask(mmu, mmu == &vcpu->arch.guest_mmu, false);
 	update_pkru_bitmask(mmu);
 }
 
@@ -5940,7 +5946,7 @@ static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
 }
 
 void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr4,
-			     u64 efer, gpa_t nested_cr3)
+			     u64 efer, gpa_t nested_cr3, u64 misc_ctl)
 {
 	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
 	struct kvm_mmu_role_regs regs = {
@@ -5953,7 +5959,7 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr4,
 
 	/* NPT requires CR0.PG=1. */
 	WARN_ON_ONCE(cpu_role.base.direct || !cpu_role.base.guest_mode);
-	cpu_role.base.cr4_smep = false;
+	cpu_role.base.cr4_smep = (misc_ctl & SVM_MISC_ENABLE_GMET) != 0;
 
 	root_role = cpu_role.base;
 	root_role.level = kvm_mmu_get_tdp_level(vcpu);
@@ -6011,7 +6017,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
 		context->gva_to_gpa = ept_gva_to_gpa;
 		context->sync_spte = ept_sync_spte;
 
-		update_permission_bitmask(context, true);
+		update_permission_bitmask(context, true, true);
 		context->pkru_mask = 0;
 		reset_rsvds_bits_mask_ept(vcpu, context, execonly, huge_page_level);
 		reset_ept_shadow_zero_bits_mask(context, execonly);
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index a1cffd274000..7adfa7da210d 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -95,7 +95,8 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
 	 */
 	kvm_init_shadow_npt_mmu(vcpu, svm->vmcb01.ptr->save.cr4,
 				svm->vmcb01.ptr->save.efer,
-				svm->nested.ctl.nested_cr3);
+				svm->nested.ctl.nested_cr3,
+				svm->nested.ctl.misc_ctl);
 	vcpu->arch.mmu->get_guest_pgd     = nested_svm_get_tdp_cr3;
 	vcpu->arch.mmu->get_pdptr         = nested_svm_get_tdp_pdptr;
 	vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
@@ -2076,12 +2077,15 @@ static gpa_t svm_translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa,
 				      struct x86_exception *exception,
 				      u64 pte_access)
 {
+	struct vcpu_svm *svm = to_svm(vcpu);
 	struct kvm_mmu *mmu = vcpu->arch.mmu;
 
 	BUG_ON(!mmu_is_nested(vcpu));
 
-	/* NPT walks are always user-walks */
-	access |= PFERR_USER_MASK;
+	/* Non-GMET walks are always user-walks */
+	if (!(svm->nested.ctl.misc_ctl & SVM_MISC_ENABLE_GMET))
+		access |= PFERR_USER_MASK;
+
 	return mmu->gva_to_gpa(vcpu, mmu, gpa, access, exception);
 }
 
-- 
2.52.0



  parent reply	other threads:[~2026-04-30 15:08 UTC|newest]

Thread overview: 52+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-04-30 15:07 [PATCH v5 00/28] KVM: combined patchset for MBEC/GMET support Paolo Bonzini
2026-04-30 15:07 ` [PATCH 01/28] KVM: TDX/VMX: rework EPT_VIOLATION_EXEC_FOR_RING3_LIN into PROT_MASK Paolo Bonzini
2026-04-30 15:07 ` [PATCH 02/28] KVM: x86/mmu: remove SPTE_PERM_MASK Paolo Bonzini
2026-04-30 15:07 ` [PATCH 03/28] KVM: x86/mmu: free up bit 10 of PTEs in preparation for MBEC Paolo Bonzini
2026-04-30 18:04   ` Sean Christopherson
2026-04-30 15:07 ` [PATCH 04/28] KVM: x86/mmu: shuffle high bits of SPTEs " Paolo Bonzini
2026-04-30 18:15   ` Sean Christopherson
2026-04-30 15:07 ` [PATCH 05/28] KVM: x86/mmu: remove SPTE_EPT_* Paolo Bonzini
2026-04-30 15:07 ` [PATCH 06/28] KVM: x86/mmu: merge make_spte_{non,}executable Paolo Bonzini
2026-04-30 18:22   ` Sean Christopherson
2026-05-01  3:51     ` Paolo Bonzini
2026-04-30 15:07 ` [PATCH 07/28] KVM: x86/mmu: rename and clarify BYTE_MASK Paolo Bonzini
2026-04-30 18:39   ` Sean Christopherson
2026-04-30 15:07 ` [PATCH 08/28] KVM: x86/mmu: separate more EPT/non-EPT permission_fault() Paolo Bonzini
2026-04-30 18:35   ` Sean Christopherson
2026-04-30 15:07 ` [PATCH 09/28] KVM: x86/mmu: introduce ACC_READ_MASK Paolo Bonzini
2026-04-30 15:07 ` [PATCH 10/28] KVM: x86/mmu: pass PFERR_GUEST_PAGE/FINAL_MASK to kvm_translate_gpa Paolo Bonzini
2026-04-30 18:50   ` Sean Christopherson
2026-05-01  3:52     ` Paolo Bonzini
2026-04-30 15:07 ` [PATCH 11/28] KVM: x86/mmu: pass pte_access for final nGPA->GPA walk Paolo Bonzini
2026-04-30 15:07 ` [PATCH 12/28] KVM: x86: make translate_nested_gpa vendor-specific Paolo Bonzini
2026-04-30 18:53   ` Sean Christopherson
2026-05-01  3:53     ` Paolo Bonzini
2026-04-30 15:07 ` [PATCH 13/28] KVM: x86/mmu: split XS/XU bits for EPT Paolo Bonzini
2026-04-30 15:07 ` [PATCH 14/28] KVM: x86/mmu: move cr4_smep to base role Paolo Bonzini
2026-04-30 15:07 ` [PATCH 15/28] KVM: VMX: enable use of MBEC Paolo Bonzini
2026-04-30 19:00   ` Sean Christopherson
2026-05-05 10:02   ` David Riley
2026-04-30 15:07 ` [PATCH 16/28] KVM: nVMX: pass advanced EPT violation vmexit info to guest Paolo Bonzini
2026-04-30 15:07 ` [PATCH 17/28] KVM: nVMX: pass PFERR_USER_MASK to MMU on EPT violations Paolo Bonzini
2026-04-30 19:03   ` Sean Christopherson
2026-04-30 15:07 ` [PATCH 18/28] KVM: x86/mmu: add support for MBEC to EPT page table walks Paolo Bonzini
2026-04-30 15:07 ` [PATCH 19/28] KVM: nVMX: advertise MBEC to nested guests Paolo Bonzini
2026-04-30 15:07 ` [PATCH 20/28] KVM: nVMX: allow MBEC with EVMCS Paolo Bonzini
2026-04-30 15:07 ` [PATCH 21/28] KVM: x86/mmu: propagate access mask from root pages down Paolo Bonzini
2026-04-30 15:07 ` [PATCH 22/28] KVM: x86/mmu: introduce cpu_role bit for availability of PFEC.I/D Paolo Bonzini
2026-04-30 15:07 ` [PATCH 23/28] KVM: SVM: add GMET bit definitions Paolo Bonzini
2026-04-30 15:07 ` [PATCH 24/28] KVM: x86/mmu: hard code more bits in kvm_init_shadow_npt_mmu Paolo Bonzini
2026-04-30 19:09   ` Sean Christopherson
2026-05-01  3:50     ` Paolo Bonzini
2026-05-01 13:29       ` Sean Christopherson
2026-04-30 15:07 ` Paolo Bonzini [this message]
2026-04-30 15:07 ` [PATCH 26/28] KVM: SVM: enable GMET and set it in MMU role Paolo Bonzini
2026-04-30 19:15   ` Sean Christopherson
2026-05-01  3:59     ` Paolo Bonzini
2026-05-01 14:38       ` Sean Christopherson
2026-04-30 15:07 ` [PATCH 27/28] KVM: SVM: work around errata 1218 Paolo Bonzini
2026-04-30 15:07 ` [PATCH 28/28] KVM: nSVM: enable GMET for guests Paolo Bonzini
2026-04-30 16:16 ` [PATCH v5 00/28] KVM: combined patchset for MBEC/GMET support Paolo Bonzini
2026-04-30 19:17   ` Sean Christopherson
2026-05-01  3:46     ` Paolo Bonzini
  -- strict thread matches above, loose matches on Subject: below --
2026-04-28 11:09 [PATCH v4 " Paolo Bonzini
2026-04-28 11:09 ` [PATCH 25/28] KVM: x86/mmu: add support for GMET to NPT page table walks Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260430150747.76749-26-pbonzini@redhat.com \
    --to=pbonzini@redhat.com \
    --cc=d.riley@proxmox.com \
    --cc=jon@nutanix.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox