public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
* [Patch v3] Enable CPU SMEP feature for KVM
@ 2011-05-26 13:28 Yang, Wei Y
  2011-05-27  2:56 ` Tian, Kevin
  2011-05-27  8:45 ` Avi Kivity
  0 siblings, 2 replies; 8+ messages in thread
From: Yang, Wei Y @ 2011-05-26 13:28 UTC (permalink / raw)
  To: Avi Kivity; +Cc: kvm@vger.kernel.org


This patchset enables a new CPU feature SMEP (Supervisor Mode Execution
Protection) in KVM. SMEP prevents kernel from executing code in application.
Updated Intel SDM describes this CPU feature. The document will be published soon.

This patchset is based on Fenghua's SMEP patch series, as referred by:
https://lkml.org/lkml/2011/5/17/523

Changes since v2: enable SMEP for spt mode.

 Signed-off-by: Yang Wei <wei.y.yang@intel.com>
 Signed-off-by: Shan Haitao <haitao.shan@intel.com>

---
 arch/x86/include/asm/kvm_host.h |    1 +
 arch/x86/kvm/paging_tmpl.h      |   15 +++++++++++++--
 arch/x86/kvm/vmx.c              |    9 +++++++++
 arch/x86/kvm/x86.c              |    7 +++++--
 4 files changed, 28 insertions(+), 4 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index d2ac8e2..154287b 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -306,6 +306,7 @@ struct kvm_vcpu_arch {
 	unsigned long cr3;
 	unsigned long cr4;
 	unsigned long cr4_guest_owned_bits;
+	unsigned long cr4_reserved_bits;
 	unsigned long cr8;
 	u32 hflags;
 	u64 efer;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 6c4dc01..7e0b2f8 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -120,7 +120,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
 				    struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 				    gva_t addr, u32 access)
 {
-	pt_element_t pte;
+	pt_element_t pte, pte_smep;
 	pt_element_t __user *ptep_user;
 	gfn_t table_gfn;
 	unsigned index, pt_access, uninitialized_var(pte_access);
@@ -150,7 +150,11 @@ walk:
 		}
 		--walker->level;
 	}
+	pte_smep = ~0ULL;
+#else
+	pte_smep = ~0U;
 #endif
+
 	ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
 	       (mmu->get_cr3(vcpu) & CR3_NONPAE_RESERVED_BITS) == 0);
 
@@ -234,6 +238,8 @@ walk:
 
 		walker->ptes[walker->level - 1] = pte;
 
+		pte_smep &= pte;
+
 		if ((walker->level == PT_PAGE_TABLE_LEVEL) ||
 		    ((walker->level == PT_DIRECTORY_LEVEL) &&
 				is_large_pte(pte) &&
@@ -246,6 +252,11 @@ walk:
 			gfn_t gfn;
 			u32 ac;
 
+			if (unlikely(fetch_fault && !user_fault))
+				if ((vcpu->arch.cr4 & X86_CR4_SMEP)
+						&& (pte_smep & PT_USER_MASK))
+					eperm = true;
+
 			gfn = gpte_to_gfn_lvl(pte, lvl);
 			gfn += (addr & PT_LVL_OFFSET_MASK(lvl)) >> PAGE_SHIFT;
 
@@ -305,7 +316,7 @@ error:
 
 	walker->fault.error_code |= write_fault | user_fault;
 
-	if (fetch_fault && mmu->nx)
+	if (fetch_fault && (mmu->nx || (vcpu->arch.cr4 & X86_CR4_SMEP)))
 		walker->fault.error_code |= PFERR_FETCH_MASK;
 	if (rsvd_fault)
 		walker->fault.error_code |= PFERR_RSVD_MASK;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4c3fa0f..7ad24fd 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -4507,6 +4507,15 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
 			}
 		}
 	}
+
+	best = kvm_find_cpuid_entry(vcpu, 7, 0);
+	if (best && (best->ebx & bit(X86_FEATURE_SMEP))) {
+		if (boot_cpu_has(X86_FEATURE_SMEP))
+			vcpu->arch.cr4_reserved_bits &=
+				~((unsigned long)X86_CR4_SMEP);
+		else
+			best->ebx &= ~(bit(X86_FEATURE_SMEP));
+	}
 }
 
 static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 77c9d86..6ead39e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -598,9 +598,10 @@ static void update_cpuid(struct kvm_vcpu *vcpu)
 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
 	unsigned long old_cr4 = kvm_read_cr4(vcpu);
-	unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
+	unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE
+				 | X86_CR4_PAE | X86_CR4_SMEP;
 
-	if (cr4 & CR4_RESERVED_BITS)
+	if (cr4 & vcpu->arch.cr4_reserved_bits)
 		return 1;
 
 	if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
@@ -6222,6 +6223,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 
 	kvm_async_pf_hash_reset(vcpu);
 
+	vcpu->arch.cr4_reserved_bits = CR4_RESERVED_BITS;
+
 	return 0;
 fail_free_mce_banks:
 	kfree(vcpu->arch.mce_banks);
-- 
1.7.4.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2011-05-29  7:20 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2011-05-26 13:28 [Patch v3] Enable CPU SMEP feature for KVM Yang, Wei Y
2011-05-27  2:56 ` Tian, Kevin
2011-05-27  8:47   ` Avi Kivity
2011-05-27  8:45 ` Avi Kivity
2011-05-29  5:16   ` Li, Xin
2011-05-29  6:42     ` Avi Kivity
2011-05-29  7:15       ` Li, Xin
2011-05-29  7:20         ` Avi Kivity

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox