From: Dongxiao Xu <dongxiao.xu@intel.com>
To: kvm@vger.kernel.org
Cc: mtosatti@redhat.com, gleb@redhat.com
Subject: [PATCH v2 4/4] nested vmx: use a list to store the launched vmcs12 for L1 VMM
Date: Thu, 22 Nov 2012 12:51:59 +0800 [thread overview]
Message-ID: <1353559919-29439-5-git-send-email-dongxiao.xu@intel.com> (raw)
In-Reply-To: <1353559919-29439-1-git-send-email-dongxiao.xu@intel.com>
The launch state is not a member in the VMCS area, use a separate
variable (list) to store it instead.
Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com>
---
arch/x86/kvm/vmx.c | 86 +++++++++++++++++++++++++++++++++++++++++++++++++---
1 files changed, 81 insertions(+), 5 deletions(-)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 20de88b..3be9265 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -177,8 +177,7 @@ struct __packed vmcs12 {
u32 revision_id;
u32 abort;
- u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
- u32 padding[7]; /* room for future expansion */
+ u32 padding[8]; /* room for future expansion */
u64 io_bitmap_a;
u64 io_bitmap_b;
@@ -339,6 +338,11 @@ struct vmcs02_list {
struct loaded_vmcs vmcs02;
};
+struct vmcs12_list {
+ unsigned long vmcs12_pa;
+ struct list_head node;
+};
+
/*
* The nested_vmx structure is part of vcpu_vmx, and holds information we need
* for correct emulation of VMX (i.e., nested VMX) on this vcpu.
@@ -364,6 +368,8 @@ struct nested_vmx {
* we must keep them pinned while L2 runs.
*/
struct page *apic_access_page;
+ /* vmcs12_pool contains the launched vmcs12. */
+ struct list_head vmcs12_pool;
};
struct vcpu_vmx {
@@ -619,6 +625,58 @@ static void nested_release_page_clean(struct page *page)
kvm_release_page_clean(page);
}
+static int vmcs12_launched(struct list_head *vmcs12_pool,
+ unsigned long vmcs12_pa)
+{
+ struct vmcs12_list *iter;
+ struct list_head *pos;
+ int launched = 0;
+
+ list_for_each(pos, vmcs12_pool) {
+ iter = list_entry(pos, struct vmcs12_list, node);
+ if (vmcs12_pa == iter->vmcs12_pa) {
+ launched = 1;
+ break;
+ }
+ }
+
+ return launched;
+}
+
+static int set_vmcs12_launched(struct list_head *vmcs12_pool,
+ unsigned long vmcs12_pa)
+{
+ struct vmcs12_list *vmcs12;
+
+ if (vmcs12_launched(vmcs12_pool, vmcs12_pa))
+ return 0;
+
+ vmcs12 = kzalloc(sizeof(struct vmcs12_list), GFP_KERNEL);
+ if (!vmcs12)
+ return -ENOMEM;
+
+ vmcs12->vmcs12_pa = vmcs12_pa;
+ list_add(&vmcs12->node, vmcs12_pool);
+
+ return 0;
+}
+
+static void clear_vmcs12_launched(struct list_head *vmcs12_pool,
+ unsigned long vmcs12_pa)
+{
+ struct vmcs12_list *iter;
+ struct list_head *pos;
+
+ list_for_each(pos, vmcs12_pool) {
+ iter = list_entry(pos, struct vmcs12_list, node);
+ if (vmcs12_pa == iter->vmcs12_pa) {
+ list_del(&iter->node);
+ kfree(iter);
+ break;
+ }
+ }
+}
+
static u64 construct_eptp(unsigned long root_hpa);
static void kvm_cpu_vmxon(u64 addr);
static void kvm_cpu_vmxoff(void);
@@ -5116,6 +5174,18 @@ static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx)
}
/*
+ * Free the vmcs12 list.
+ */
+static void nested_free_vmcs12_list(struct vcpu_vmx *vmx)
+{
+ struct vmcs12_list *item, *n;
+ list_for_each_entry_safe(item, n, &vmx->nested.vmcs12_pool, node) {
+ list_del(&item->node);
+ kfree(item);
+ }
+}
+
+/*
* Emulate the VMXON instruction.
* Currently, we just remember that VMX is active, and do not save or even
* inspect the argument to VMXON (the so-called "VMXON pointer") because we
@@ -5212,6 +5282,7 @@ static void free_nested(struct vcpu_vmx *vmx)
}
nested_free_all_saved_vmcss(vmx);
+ nested_free_vmcs12_list(vmx);
}
/* Emulate the VMXOFF instruction */
@@ -5364,7 +5435,7 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
return 1;
}
vmcs12 = kmap(page);
- vmcs12->launch_state = 0;
+ clear_vmcs12_launched(&vmx->nested.vmcs12_pool, __pa(vmcs12));
kunmap(page);
nested_release_page(page);
@@ -6460,6 +6531,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
vmx->nested.current_vmptr = -1ull;
vmx->nested.current_vmcs12 = NULL;
+ INIT_LIST_HEAD(&vmx->nested.vmcs12_pool);
return &vmx->vcpu;
@@ -6839,6 +6911,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
struct vcpu_vmx *vmx = to_vmx(vcpu);
int cpu;
struct loaded_vmcs *vmcs02;
+ int is_launched;
if (!nested_vmx_check_permission(vcpu) ||
!nested_vmx_check_vmcs12(vcpu))
@@ -6857,7 +6930,9 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
* for misconfigurations which will anyway be caught by the processor
* when using the merged vmcs02.
*/
- if (vmcs12->launch_state == launch) {
+ is_launched =
+ vmcs12_launched(&vmx->nested.vmcs12_pool, __pa(vmcs12));
+ if (is_launched == launch) {
nested_vmx_failValid(vcpu,
launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
: VMXERR_VMRESUME_NONLAUNCHED_VMCS);
@@ -6946,7 +7021,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
vcpu->cpu = cpu;
put_cpu();
- vmcs12->launch_state = 1;
+ if (set_vmcs12_launched(&vmx->nested.vmcs12_pool, __pa(vmcs12)) < 0)
+ return -ENOMEM;
prepare_vmcs02(vcpu);
--
1.7.1
next prev parent reply other threads:[~2012-11-22 18:26 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-11-22 4:51 [PATCH v2 0/4] nested vmx code clean up and restructure Dongxiao Xu
2012-11-22 4:51 ` [PATCH v2 1/4] nested vmx: clean up for vmcs12 read and write Dongxiao Xu
2012-11-22 4:51 ` [PATCH v2 2/4] nested vmx: clean up for nested_cpu_has_xxx functions Dongxiao Xu
2012-11-22 4:51 ` [PATCH v2 3/4] nested vmx: use vmcs12_read/write() to operate VMCS fields Dongxiao Xu
2012-11-22 4:51 ` Dongxiao Xu [this message]
2012-11-28 0:29 ` [PATCH v2 4/4] nested vmx: use a list to store the launched vmcs12 for L1 VMM Marcelo Tosatti
2012-11-28 11:27 ` Gleb Natapov
2012-11-29 2:05 ` Xu, Dongxiao
2012-11-28 12:30 ` Orit Wasserman
2012-11-29 2:09 ` Xu, Dongxiao
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1353559919-29439-5-git-send-email-dongxiao.xu@intel.com \
--to=dongxiao.xu@intel.com \
--cc=gleb@redhat.com \
--cc=kvm@vger.kernel.org \
--cc=mtosatti@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox