xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Dongxiao Xu <dongxiao.xu@intel.com>
To: xen-devel@lists.xensource.com
Cc: eddie.dong@intel.com, xiantao.zhang@intel.com, jun.nakajima@intel.com
Subject: [PATCH 3/4] nested vmx: optimize for bulk access of virtual VMCS
Date: Thu, 17 Jan 2013 13:37:31 +0800	[thread overview]
Message-ID: <1358401052-14036-4-git-send-email-dongxiao.xu@intel.com> (raw)
In-Reply-To: <1358401052-14036-1-git-send-email-dongxiao.xu@intel.com>

After we use the VMREAD/VMWRITE to build up the virtual VMCS, each
access to the virtual VMCS needs two VMPTRLD and one VMCLEAR to
switch the environment, which might be an overhead to performance.
This commit tries to handle multiple virtual VMCS access together
to improve the performance.

Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com>
---
 xen/arch/x86/hvm/vmx/vvmx.c |   89 +++++++++++++++++++++++++++++++++++--------
 1 files changed, 73 insertions(+), 16 deletions(-)

diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 2f0076a..9aba89e 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -829,6 +829,34 @@ static void vvmcs_to_shadow(void *vvmcs, unsigned int field)
     __vmwrite(field, value);
 }
 
+static void vvmcs_to_shadow_bulk(void *vvmcs, int n, u16 *field)
+{
+    u64 *value = NULL;
+    int i = 0;
+
+    if ( cpu_has_vmx_vmcs_shadowing )
+    {
+        value = xzalloc_array(u64, n);
+        if ( !value )
+            goto fallback;
+
+        virtual_vmcs_enter(vvmcs);
+        for ( i = 0; i < n; i++ )
+            value[i] = __vmread(field[i]);
+        virtual_vmcs_exit(vvmcs);
+
+        for ( i = 0; i < n; i++ )
+            __vmwrite(field[i], value[i]);
+
+        xfree(value);
+        return;
+    }
+
+fallback:
+    for ( i = 0; i < n; i++ )
+        vvmcs_to_shadow(vvmcs, field[i]);
+}
+
 static void shadow_to_vvmcs(void *vvmcs, unsigned int field)
 {
     u64 value;
@@ -839,6 +867,34 @@ static void shadow_to_vvmcs(void *vvmcs, unsigned int field)
         __set_vvmcs(vvmcs, field, value);
 }
 
+static void shadow_to_vvmcs_bulk(void *vvmcs, int n, u16 *field)
+{
+    u64 *value = NULL;
+    int i = 0;
+
+    if ( cpu_has_vmx_vmcs_shadowing )
+    {
+        value = xzalloc_array(u64, n);
+        if ( !value )
+            goto fallback;
+
+        for ( i = 0; i < n; i++ )
+            value[i] = __vmread(field[i]);
+
+        virtual_vmcs_enter(vvmcs);
+        for ( i = 0; i < n; i++ )
+            __vmwrite(field[i], value[i]);
+        virtual_vmcs_exit(vvmcs);
+
+        xfree(value);
+        return;
+    }
+
+fallback:
+    for ( i = 0; i < n; i++ )
+        shadow_to_vvmcs(vvmcs, field[i]);
+}
+
 static void load_shadow_control(struct vcpu *v)
 {
     /*
@@ -862,13 +918,17 @@ static void load_shadow_guest_state(struct vcpu *v)
 {
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
     void *vvmcs = nvcpu->nv_vvmcx;
-    int i;
     u32 control;
     u64 cr_gh_mask, cr_read_shadow;
 
+    u16 vmentry_fields[] = {
+        VM_ENTRY_INTR_INFO,
+        VM_ENTRY_EXCEPTION_ERROR_CODE,
+        VM_ENTRY_INSTRUCTION_LEN,
+    };
+
     /* vvmcs.gstate to shadow vmcs.gstate */
-    for ( i = 0; i < ARRAY_SIZE(vmcs_gstate_field); i++ )
-        vvmcs_to_shadow(vvmcs, vmcs_gstate_field[i]);
+    vvmcs_to_shadow_bulk(vvmcs, ARRAY_SIZE(vmcs_gstate_field), (u16 *)vmcs_gstate_field);
 
     hvm_set_cr0(__get_vvmcs(vvmcs, GUEST_CR0));
     hvm_set_cr4(__get_vvmcs(vvmcs, GUEST_CR4));
@@ -882,9 +942,7 @@ static void load_shadow_guest_state(struct vcpu *v)
 
     hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
 
-    vvmcs_to_shadow(vvmcs, VM_ENTRY_INTR_INFO);
-    vvmcs_to_shadow(vvmcs, VM_ENTRY_EXCEPTION_ERROR_CODE);
-    vvmcs_to_shadow(vvmcs, VM_ENTRY_INSTRUCTION_LEN);
+    vvmcs_to_shadow_bulk(vvmcs, ARRAY_SIZE(vmentry_fields), vmentry_fields);
 
     /*
      * While emulate CR0 and CR4 for nested virtualization, set the CR0/CR4
@@ -904,10 +962,13 @@ static void load_shadow_guest_state(struct vcpu *v)
     if ( nvmx_ept_enabled(v) && hvm_pae_enabled(v) &&
          (v->arch.hvm_vcpu.guest_efer & EFER_LMA) )
     {
-        vvmcs_to_shadow(vvmcs, GUEST_PDPTR0);
-        vvmcs_to_shadow(vvmcs, GUEST_PDPTR1);
-        vvmcs_to_shadow(vvmcs, GUEST_PDPTR2);
-        vvmcs_to_shadow(vvmcs, GUEST_PDPTR3);
+        u16 gpdptr_fields[] = {
+            GUEST_PDPTR0,
+            GUEST_PDPTR1,
+            GUEST_PDPTR2,
+            GUEST_PDPTR3,
+        };
+        vvmcs_to_shadow_bulk(vvmcs, ARRAY_SIZE(gpdptr_fields), gpdptr_fields);
     }
 
     /* TODO: CR3 target control */
@@ -998,13 +1059,11 @@ static void virtual_vmentry(struct cpu_user_regs *regs)
 
 static void sync_vvmcs_guest_state(struct vcpu *v, struct cpu_user_regs *regs)
 {
-    int i;
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
     void *vvmcs = nvcpu->nv_vvmcx;
 
     /* copy shadow vmcs.gstate back to vvmcs.gstate */
-    for ( i = 0; i < ARRAY_SIZE(vmcs_gstate_field); i++ )
-        shadow_to_vvmcs(vvmcs, vmcs_gstate_field[i]);
+    shadow_to_vvmcs_bulk(vvmcs, ARRAY_SIZE(vmcs_gstate_field), (u16 *)vmcs_gstate_field);
     /* RIP, RSP are in user regs */
     __set_vvmcs(vvmcs, GUEST_RIP, regs->eip);
     __set_vvmcs(vvmcs, GUEST_RSP, regs->esp);
@@ -1016,13 +1075,11 @@ static void sync_vvmcs_guest_state(struct vcpu *v, struct cpu_user_regs *regs)
 
 static void sync_vvmcs_ro(struct vcpu *v)
 {
-    int i;
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
     void *vvmcs = nvcpu->nv_vvmcx;
 
-    for ( i = 0; i < ARRAY_SIZE(vmcs_ro_field); i++ )
-        shadow_to_vvmcs(nvcpu->nv_vvmcx, vmcs_ro_field[i]);
+    shadow_to_vvmcs_bulk(vvmcs, ARRAY_SIZE(vmcs_ro_field), (u16 *)vmcs_ro_field);
 
     /* Adjust exit_reason/exit_qualifciation for violation case */
     if ( __get_vvmcs(vvmcs, VM_EXIT_REASON) == EXIT_REASON_EPT_VIOLATION )
-- 
1.7.1

  parent reply	other threads:[~2013-01-17  5:37 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-01-17  5:37 [PATCH 0/4] nested vmx: enable VMCS shadowing feature Dongxiao Xu
2013-01-17  5:37 ` [PATCH 1/4] nested vmx: Use a list to store the launched vvmcs for L1 VMM Dongxiao Xu
2013-01-17 11:38   ` Jan Beulich
2013-01-17 12:39     ` Xu, Dongxiao
2013-01-17 12:58       ` Jan Beulich
2013-01-17  5:37 ` [PATCH 2/4] nested vmx: use VMREAD/VMWRITE to construct vVMCS if enabled VMCS shadowing Dongxiao Xu
2013-01-17 11:40   ` Jan Beulich
2013-01-17  5:37 ` Dongxiao Xu [this message]
2013-01-17 11:48   ` [PATCH 3/4] nested vmx: optimize for bulk access of virtual VMCS Jan Beulich
2013-01-17  5:37 ` [PATCH 4/4] nested vmx: enable VMCS shadowing feature Dongxiao Xu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1358401052-14036-4-git-send-email-dongxiao.xu@intel.com \
    --to=dongxiao.xu@intel.com \
    --cc=eddie.dong@intel.com \
    --cc=jun.nakajima@intel.com \
    --cc=xen-devel@lists.xensource.com \
    --cc=xiantao.zhang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).