From: Dongxiao Xu <dongxiao.xu@intel.com>
To: xen-devel@lists.xen.org
Subject: [PATCH v2] nvmx: fix resource relinquish for nested VMX
Date: Mon, 27 Aug 2012 10:27:12 +0800 [thread overview]
Message-ID: <1346034432-29872-1-git-send-email-dongxiao.xu@intel.com> (raw)
Change from v1:
- Add annotations to the VMCS pointer switching when destroy nvcpus.
- Remove the unnecessary nvmx_purge_vvmcs() function call when destroy
nested vcpus.
The previous order of relinquish resource is:
relinquish_domain_resources() -> vcpu_destroy() -> nvmx_vcpu_destroy().
However some L1 resources like nv_vvmcx and io_bitmaps are free in
nvmx_vcpu_destroy(), therefore the relinquish_domain_resources()
will not reduce the refcnt of the domain to 0, therefore the latter
vcpu release functions will not be called.
To fix this issue, we need to release the nv_vvmcx and io_bitmaps in
relinquish_domain_resources().
Besides, after destroy the nested vcpu, we need to switch the vmx->vmcs
back to the L1 and let the vcpu_destroy() logic to free the L1 VMCS page.
Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com>
---
xen/arch/x86/hvm/hvm.c | 3 +++
xen/arch/x86/hvm/vmx/vmx.c | 3 ++-
xen/arch/x86/hvm/vmx/vvmx.c | 18 +++++++++++++++++-
xen/include/asm-x86/hvm/hvm.h | 1 +
xen/include/asm-x86/hvm/vmx/vvmx.h | 1 +
5 files changed, 24 insertions(+), 2 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 7f8a025..0576a24 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -561,6 +561,9 @@ int hvm_domain_initialise(struct domain *d)
void hvm_domain_relinquish_resources(struct domain *d)
{
+ if ( hvm_funcs.nhvm_domain_relinquish_resources )
+ hvm_funcs.nhvm_domain_relinquish_resources(d);
+
hvm_destroy_ioreq_page(d, &d->arch.hvm_domain.ioreq);
hvm_destroy_ioreq_page(d, &d->arch.hvm_domain.buf_ioreq);
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index ffb86c1..3ea7012 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1547,7 +1547,8 @@ static struct hvm_function_table __read_mostly vmx_function_table = {
.nhvm_vcpu_asid = nvmx_vcpu_asid,
.nhvm_vmcx_guest_intercepts_trap = nvmx_intercepts_exception,
.nhvm_vcpu_vmexit_trap = nvmx_vmexit_trap,
- .nhvm_intr_blocked = nvmx_intr_blocked
+ .nhvm_intr_blocked = nvmx_intr_blocked,
+ .nhvm_domain_relinquish_resources = nvmx_domain_relinquish_resources
};
struct hvm_function_table * __init start_vmx(void)
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 2e0b79d..5f6553d 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -57,7 +57,15 @@ void nvmx_vcpu_destroy(struct vcpu *v)
{
struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
- nvmx_purge_vvmcs(v);
+ /*
+ * When destroying the vcpu, it may be running on behalf of L2 guest.
+ * Therefore we need to switch the VMCS pointer back to the L1 VMCS,
+ * in order to avoid double free of L2 VMCS and the possible memory
+ * leak of L1 VMCS page.
+ */
+ if ( nvcpu->nv_n1vmcx )
+ v->arch.hvm_vmx.vmcs = nvcpu->nv_n1vmcx;
+
if ( nvcpu->nv_n2vmcx ) {
__vmpclear(virt_to_maddr(nvcpu->nv_n2vmcx));
free_xenheap_page(nvcpu->nv_n2vmcx);
@@ -65,6 +73,14 @@ void nvmx_vcpu_destroy(struct vcpu *v)
}
}
+void nvmx_domain_relinquish_resources(struct domain *d)
+{
+ struct vcpu *v;
+
+ for_each_vcpu ( d, v )
+ nvmx_purge_vvmcs(v);
+}
+
int nvmx_vcpu_reset(struct vcpu *v)
{
return 0;
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 7243c4e..3592a8c 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -179,6 +179,7 @@ struct hvm_function_table {
bool_t (*nhvm_vmcx_hap_enabled)(struct vcpu *v);
enum hvm_intblk (*nhvm_intr_blocked)(struct vcpu *v);
+ void (*nhvm_domain_relinquish_resources)(struct domain *d);
};
extern struct hvm_function_table hvm_funcs;
diff --git a/xen/include/asm-x86/hvm/vmx/vvmx.h b/xen/include/asm-x86/hvm/vmx/vvmx.h
index 995f9f4..bbc34e7 100644
--- a/xen/include/asm-x86/hvm/vmx/vvmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vvmx.h
@@ -96,6 +96,7 @@ uint32_t nvmx_vcpu_asid(struct vcpu *v);
enum hvm_intblk nvmx_intr_blocked(struct vcpu *v);
int nvmx_intercepts_exception(struct vcpu *v,
unsigned int trap, int error_code);
+void nvmx_domain_relinquish_resources(struct domain *d);
int nvmx_handle_vmxon(struct cpu_user_regs *regs);
int nvmx_handle_vmxoff(struct cpu_user_regs *regs);
--
1.7.1
reply other threads:[~2012-08-27 2:27 UTC|newest]
Thread overview: [no followups] expand[flat|nested] mbox.gz Atom feed
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1346034432-29872-1-git-send-email-dongxiao.xu@intel.com \
--to=dongxiao.xu@intel.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).