From: Paolo Bonzini <pbonzini@redhat.com>
To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org
Cc: guangrong.xiao@linux.intel.com, rkrcmar@redhat.com, bdas@redhat.com
Subject: [PATCH v2 12/13] KVM: x86: add SMM to the MMU role, support SMRAM address space
Date: Wed, 27 May 2015 19:05:13 +0200 [thread overview]
Message-ID: <1432746314-50196-13-git-send-email-pbonzini@redhat.com> (raw)
In-Reply-To: <1432746314-50196-1-git-send-email-pbonzini@redhat.com>
This is now very simple to do. The only interesting part is a simple
trick to find the right memslot in gfn_to_rmap, retrieving the address
space from the spte role word. The same trick is used in the auditing
code.
The comment on top of union kvm_mmu_page_role has been stale forever,
so remove it. Speaking of stale code, remove pad_for_nice_hex_output
too: it was splitting the "access" bitfield across two bytes and thus
had effectively turned into pad_for_ugly_hex_output.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
v1->v2: new
arch/x86/include/asm/kvm_host.h | 26 +++++++++++++++-----------
arch/x86/kvm/mmu.c | 15 ++++++++++++---
arch/x86/kvm/mmu_audit.c | 10 +++++++---
arch/x86/kvm/x86.c | 2 ++
4 files changed, 36 insertions(+), 17 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 5a5e13af6e03..47006683f2fe 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -184,23 +184,12 @@ struct kvm_mmu_memory_cache {
void *objects[KVM_NR_MEM_OBJS];
};
-/*
- * kvm_mmu_page_role, below, is defined as:
- *
- * bits 0:3 - total guest paging levels (2-4, or zero for real mode)
- * bits 4:7 - page table level for this shadow (1-4)
- * bits 8:9 - page table quadrant for 2-level guests
- * bit 16 - direct mapping of virtual to physical mapping at gfn
- * used for real mode and two-dimensional paging
- * bits 17:19 - common access permissions for all ptes in this shadow page
- */
union kvm_mmu_page_role {
unsigned word;
struct {
unsigned level:4;
unsigned cr4_pae:1;
unsigned quadrant:2;
- unsigned pad_for_nice_hex_output:6;
unsigned direct:1;
unsigned access:3;
unsigned invalid:1;
@@ -208,6 +197,15 @@ union kvm_mmu_page_role {
unsigned cr0_wp:1;
unsigned smep_andnot_wp:1;
unsigned smap_andnot_wp:1;
+ unsigned :8;
+
+ /*
+ * This is left at the top of the word so that
+ * kvm_memslots_for_spte_role can extract it with a
+ * simple shift. While there is room, give it a whole
+ * byte so it is also faster to load it from memory.
+ */
+ unsigned smm:8;
};
};
@@ -1120,6 +1118,12 @@ enum {
#define HF_SMM_MASK (1 << 6)
#define HF_SMM_INSIDE_NMI_MASK (1 << 7)
+#define __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
+#define KVM_ADDRESS_SPACE_NUM 2
+
+#define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
+#define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
+
/*
* Hardware virtualization extension instructions may fault if a
* reboot turns off virtualization while processes are running.
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index a749490bc1db..8e9b1758b7a7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -806,13 +806,15 @@ static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
{
+ struct kvm_memslots *slots;
struct kvm_memory_slot *slot;
struct kvm_lpage_info *linfo;
gfn_t gfn;
int i;
gfn = sp->gfn;
- slot = gfn_to_memslot(kvm, gfn);
+ slots = kvm_memslots_for_spte_role(kvm, sp->role);
+ slot = __gfn_to_memslot(slots, gfn);
for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
linfo = lpage_info_slot(gfn, slot, i);
linfo->write_count += 1;
@@ -822,13 +824,15 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
{
+ struct kvm_memslots *slots;
struct kvm_memory_slot *slot;
struct kvm_lpage_info *linfo;
gfn_t gfn;
int i;
gfn = sp->gfn;
- slot = gfn_to_memslot(kvm, gfn);
+ slots = kvm_memslots_for_spte_role(kvm, sp->role);
+ slot = __gfn_to_memslot(slots, gfn);
for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
linfo = lpage_info_slot(gfn, slot, i);
linfo->write_count -= 1;
@@ -1045,9 +1049,11 @@ static unsigned long *__gfn_to_rmap(gfn_t gfn, int level,
*/
static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, struct kvm_mmu_page *sp)
{
+ struct kvm_memslots *slots;
struct kvm_memory_slot *slot;
- slot = gfn_to_memslot(kvm, gfn);
+ slots = kvm_memslots_for_spte_role(kvm, sp->role);
+ slot = __gfn_to_memslot(slots, gfn);
return __gfn_to_rmap(gfn, sp->role.level, slot);
}
@@ -3924,6 +3930,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
struct kvm_mmu *context = &vcpu->arch.mmu;
context->base_role.word = 0;
+ context->base_role.smm = is_smm(vcpu);
context->page_fault = tdp_page_fault;
context->sync_page = nonpaging_sync_page;
context->invlpg = nonpaging_invlpg;
@@ -3985,6 +3992,7 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
= smep && !is_write_protection(vcpu);
context->base_role.smap_andnot_wp
= smap && !is_write_protection(vcpu);
+ context->base_role.smm = is_smm(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
@@ -4267,6 +4275,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
.nxe = 1,
.smep_andnot_wp = 1,
.smap_andnot_wp = 1,
+ .smm = 1,
};
/*
diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c
index 78288c15400c..a4f62e6f2db2 100644
--- a/arch/x86/kvm/mmu_audit.c
+++ b/arch/x86/kvm/mmu_audit.c
@@ -131,12 +131,16 @@ static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
unsigned long *rmapp;
struct kvm_mmu_page *rev_sp;
+ struct kvm_memslots *slots;
+ struct kvm_memory_slot *slot;
gfn_t gfn;
rev_sp = page_header(__pa(sptep));
gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt);
- if (!gfn_to_memslot(kvm, gfn)) {
+ slots = kvm_memslots_for_spte_role(kvm, rev_sp->role);
+ slot = __gfn_to_memslot(slots, gfn);
+ if (!slot) {
if (!__ratelimit(&ratelimit_state))
return;
audit_printk(kvm, "no memslot for gfn %llx\n", gfn);
@@ -146,7 +150,7 @@ static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
return;
}
- rmapp = gfn_to_rmap(kvm, gfn, rev_sp);
+ rmapp = __gfn_to_rmap(gfn, rev_sp->role.level, slot);
if (!*rmapp) {
if (!__ratelimit(&ratelimit_state))
return;
@@ -197,7 +201,7 @@ static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
if (sp->role.direct || sp->unsync || sp->role.invalid)
return;
- slots = kvm_memslots(kvm);
+ slots = kvm_memslots_for_spte_role(kvm, sp->role);
slot = __gfn_to_memslot(slots, sp->gfn);
rmapp = __gfn_to_rmap(sp->gfn, PT_PAGE_TABLE_LEVEL, slot);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 7dd6c61a71ae..2fa345d9bd6c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5477,6 +5477,8 @@ static void kvm_smm_changed(struct kvm_vcpu *vcpu)
vcpu->arch.smi_pending = 0;
}
}
+
+ kvm_mmu_reset_context(vcpu);
}
static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags)
--
1.8.3.1
next prev parent reply other threads:[~2015-05-27 17:05 UTC|newest]
Thread overview: 45+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-05-27 17:05 [PATCH v2 00/13] SMM implementation for KVM Paolo Bonzini
2015-05-27 17:05 ` [PATCH v2 01/13] KVM: x86: introduce num_emulated_msrs Paolo Bonzini
2015-05-27 17:05 ` [PATCH v2 02/13] KVM: x86: pass host_initiated to functions that read MSRs Paolo Bonzini
2015-05-27 17:05 ` [PATCH v2 03/13] KVM: x86: pass the whole hflags field to emulator and back Paolo Bonzini
2015-05-27 17:05 ` [PATCH v2 04/13] KVM: x86: API changes for SMM support Paolo Bonzini
2015-05-28 9:00 ` Paolo Bonzini
2015-05-28 13:48 ` Radim Krčmář
2015-05-29 18:50 ` Radim Krčmář
2015-05-27 17:05 ` [PATCH v2 05/13] KVM: x86: stubs " Paolo Bonzini
2015-05-27 17:05 ` [PATCH v2 06/13] KVM: x86: save/load state on SMM switch Paolo Bonzini
2015-06-03 19:02 ` Radim Krčmář
2015-06-04 6:14 ` Paolo Bonzini
2015-06-04 11:34 ` Radim Krčmář
2015-06-04 13:59 ` Paolo Bonzini
2015-05-27 17:05 ` [PATCH v2 07/13] KVM: add vcpu-specific functions to read/write/translate GFNs Paolo Bonzini
2015-05-29 19:23 ` Radim Krčmář
2015-06-05 10:26 ` Paolo Bonzini
2015-06-05 12:10 ` Radim Krčmář
2015-06-05 12:43 ` Paolo Bonzini
2015-06-05 12:46 ` Paolo Bonzini
2015-06-05 15:13 ` Radim Krčmář
2015-06-05 15:27 ` Paolo Bonzini
2015-05-27 17:05 ` [PATCH v2 08/13] KVM: implement multiple address spaces Paolo Bonzini
2015-06-09 3:20 ` Xiao Guangrong
2015-05-27 17:05 ` [PATCH v2 09/13] KVM: x86: pass kvm_mmu_page to gfn_to_rmap Paolo Bonzini
2015-06-09 3:28 ` Xiao Guangrong
2015-06-17 8:15 ` Paolo Bonzini
2015-06-18 4:41 ` Xiao Guangrong
2015-05-27 17:05 ` [PATCH v2 10/13] KVM: x86: use vcpu-specific functions to read/write/translate GFNs Paolo Bonzini
2015-06-09 3:38 ` Xiao Guangrong
2015-05-27 17:05 ` [PATCH v2 11/13] KVM: x86: work on all available address spaces Paolo Bonzini
2015-06-09 3:45 ` Xiao Guangrong
2015-05-27 17:05 ` Paolo Bonzini [this message]
2015-06-09 4:01 ` [PATCH v2 12/13] KVM: x86: add SMM to the MMU role, support SMRAM address space Xiao Guangrong
2015-06-17 8:18 ` Paolo Bonzini
2015-06-18 5:02 ` Xiao Guangrong
2015-06-18 7:02 ` Paolo Bonzini
2015-05-27 17:05 ` [PATCH v2 13/13] KVM: x86: advertise KVM_CAP_X86_SMM Paolo Bonzini
2015-05-29 19:03 ` [PATCH v2 00/13] SMM implementation for KVM Radim Krčmář
2015-05-29 19:34 ` Paolo Bonzini
2015-06-04 8:56 ` [PATCH v2 14/13] KVM: x86: latch INITs while in system management mode Paolo Bonzini
2015-06-04 8:58 ` Paolo Bonzini
2015-06-04 12:26 ` [PATCH v2 00/13] SMM implementation for KVM Radim Krčmář
2015-06-04 13:49 ` Paolo Bonzini
2015-06-05 10:17 ` Avi Kivity
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1432746314-50196-13-git-send-email-pbonzini@redhat.com \
--to=pbonzini@redhat.com \
--cc=bdas@redhat.com \
--cc=guangrong.xiao@linux.intel.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=rkrcmar@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).