xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Haozhong Zhang <haozhong.zhang@intel.com>
To: xen-devel@lists.xen.org
Cc: Haozhong Zhang <haozhong.zhang@intel.com>,
	Kevin Tian <kevin.tian@intel.com>,
	Jun Nakajima <jun.nakajima@intel.com>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	Jan Beulich <jbeulich@suse.com>
Subject: [PATCH v2 4/4] nestedhvm: replace VMCX_EADDR by INVALID_PADDR
Date: Wed, 14 Dec 2016 18:11:45 +0800	[thread overview]
Message-ID: <20161214101145.11171-5-haozhong.zhang@intel.com> (raw)
In-Reply-To: <20161214101145.11171-1-haozhong.zhang@intel.com>

... because INVALID_PADDR is a more general one.

Suggested-by: Jan Beulich <JBeulich@suse.com>
Signed-off-by: Haozhong Zhang <haozhong.zhang@intel.com>
---
 xen/arch/x86/hvm/nestedhvm.c     |  2 +-
 xen/arch/x86/hvm/svm/nestedsvm.c | 18 +++++++++---------
 xen/arch/x86/hvm/svm/vmcb.c      |  2 +-
 xen/arch/x86/hvm/vmx/vvmx.c      | 16 ++++++++--------
 xen/include/asm-x86/hvm/vcpu.h   |  2 --
 5 files changed, 19 insertions(+), 21 deletions(-)

diff --git a/xen/arch/x86/hvm/nestedhvm.c b/xen/arch/x86/hvm/nestedhvm.c
index c4671d8..c09c5b2 100644
--- a/xen/arch/x86/hvm/nestedhvm.c
+++ b/xen/arch/x86/hvm/nestedhvm.c
@@ -54,7 +54,7 @@ nestedhvm_vcpu_reset(struct vcpu *v)
 
     hvm_unmap_guest_frame(nv->nv_vvmcx, 1);
     nv->nv_vvmcx = NULL;
-    nv->nv_vvmcxaddr = VMCX_EADDR;
+    nv->nv_vvmcxaddr = INVALID_PADDR;
     nv->nv_flushp2m = 0;
     nv->nv_p2m = NULL;
 
diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index 8c9b073..4d9de86 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -68,10 +68,10 @@ int nestedsvm_vmcb_map(struct vcpu *v, uint64_t vmcbaddr)
     struct nestedvcpu *nv = &vcpu_nestedhvm(v);
 
     if (nv->nv_vvmcx != NULL && nv->nv_vvmcxaddr != vmcbaddr) {
-        ASSERT(nv->nv_vvmcxaddr != VMCX_EADDR);
+        ASSERT(nv->nv_vvmcxaddr != INVALID_PADDR);
         hvm_unmap_guest_frame(nv->nv_vvmcx, 1);
         nv->nv_vvmcx = NULL;
-        nv->nv_vvmcxaddr = VMCX_EADDR;
+        nv->nv_vvmcxaddr = INVALID_PADDR;
     }
 
     if ( !nv->nv_vvmcx )
@@ -154,7 +154,7 @@ void nsvm_vcpu_destroy(struct vcpu *v)
     if (nv->nv_n2vmcx) {
         free_vmcb(nv->nv_n2vmcx);
         nv->nv_n2vmcx = NULL;
-        nv->nv_n2vmcx_pa = VMCX_EADDR;
+        nv->nv_n2vmcx_pa = INVALID_PADDR;
     }
     if (svm->ns_iomap)
         svm->ns_iomap = NULL;
@@ -164,8 +164,8 @@ int nsvm_vcpu_reset(struct vcpu *v)
 {
     struct nestedsvm *svm = &vcpu_nestedsvm(v);
 
-    svm->ns_msr_hsavepa = VMCX_EADDR;
-    svm->ns_ovvmcb_pa = VMCX_EADDR;
+    svm->ns_msr_hsavepa = INVALID_PADDR;
+    svm->ns_ovvmcb_pa = INVALID_PADDR;
 
     svm->ns_tscratio = DEFAULT_TSC_RATIO;
 
@@ -425,7 +425,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
 
     /* Check if virtual VMCB cleanbits are valid */
     vcleanbits_valid = 1;
-    if (svm->ns_ovvmcb_pa == VMCX_EADDR)
+    if ( svm->ns_ovvmcb_pa == INVALID_PADDR )
         vcleanbits_valid = 0;
     if (svm->ns_ovvmcb_pa != nv->nv_vvmcxaddr)
         vcleanbits_valid = 0;
@@ -674,7 +674,7 @@ nsvm_vcpu_vmentry(struct vcpu *v, struct cpu_user_regs *regs,
     ns_vmcb = nv->nv_vvmcx;
     ASSERT(ns_vmcb != NULL);
     ASSERT(nv->nv_n2vmcx != NULL);
-    ASSERT(nv->nv_n2vmcx_pa != VMCX_EADDR);
+    ASSERT(nv->nv_n2vmcx_pa != INVALID_PADDR);
 
     /* Save values for later use. Needed for Nested-on-Nested and
      * Shadow-on-Shadow paging.
@@ -1490,8 +1490,8 @@ void nsvm_vcpu_switch(struct cpu_user_regs *regs)
     ASSERT(v->arch.hvm_svm.vmcb != NULL);
     ASSERT(nv->nv_n1vmcx != NULL);
     ASSERT(nv->nv_n2vmcx != NULL);
-    ASSERT(nv->nv_n1vmcx_pa != VMCX_EADDR);
-    ASSERT(nv->nv_n2vmcx_pa != VMCX_EADDR);
+    ASSERT(nv->nv_n1vmcx_pa != INVALID_PADDR);
+    ASSERT(nv->nv_n2vmcx_pa != INVALID_PADDR);
 
     if (nv->nv_vmexit_pending) {
  vmexit:
diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c
index 9ea014f..70d75e7 100644
--- a/xen/arch/x86/hvm/svm/vmcb.c
+++ b/xen/arch/x86/hvm/svm/vmcb.c
@@ -273,7 +273,7 @@ void svm_destroy_vmcb(struct vcpu *v)
     }
 
     nv->nv_n1vmcx = NULL;
-    nv->nv_n1vmcx_pa = VMCX_EADDR;
+    nv->nv_n1vmcx_pa = INVALID_PADDR;
     arch_svm->vmcb = NULL;
 }
 
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 5523146..c4f19a0 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -114,7 +114,7 @@ int nvmx_vcpu_initialise(struct vcpu *v)
     nvmx->guest_vpid = 0;
     nvmx->vmxon_region_pa = INVALID_PADDR;
     nvcpu->nv_vvmcx = NULL;
-    nvcpu->nv_vvmcxaddr = VMCX_EADDR;
+    nvcpu->nv_vvmcxaddr = INVALID_PADDR;
     nvmx->intr.intr_info = 0;
     nvmx->intr.error_code = 0;
     nvmx->iobitmap[0] = NULL;
@@ -766,10 +766,10 @@ static void nvmx_purge_vvmcs(struct vcpu *v)
     int i;
 
     __clear_current_vvmcs(v);
-    if ( nvcpu->nv_vvmcxaddr != VMCX_EADDR )
+    if ( nvcpu->nv_vvmcxaddr != INVALID_PADDR )
         hvm_unmap_guest_frame(nvcpu->nv_vvmcx, 1);
     nvcpu->nv_vvmcx = NULL;
-    nvcpu->nv_vvmcxaddr = VMCX_EADDR;
+    nvcpu->nv_vvmcxaddr = INVALID_PADDR;
     v->arch.hvm_vmx.vmcs_shadow_maddr = 0;
     for (i=0; i<2; i++) {
         if ( nvmx->iobitmap[i] ) {
@@ -1393,7 +1393,7 @@ int nvmx_handle_vmxon(struct cpu_user_regs *regs)
     if ( nvmx_vcpu_in_vmx(v) )
     {
         vmreturn(regs,
-                 nvcpu->nv_vvmcxaddr != VMCX_EADDR ?
+                 nvcpu->nv_vvmcxaddr != INVALID_PADDR ?
                  VMFAIL_VALID : VMFAIL_INVALID);
         return X86EMUL_OKAY;
     }
@@ -1509,7 +1509,7 @@ static int nvmx_vmresume(struct vcpu *v, struct cpu_user_regs *regs)
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
 
     /* check VMCS is valid and IO BITMAP is set */
-    if ( (nvcpu->nv_vvmcxaddr != VMCX_EADDR) &&
+    if ( (nvcpu->nv_vvmcxaddr != INVALID_PADDR) &&
             ((nvmx->iobitmap[0] && nvmx->iobitmap[1]) ||
             !(__n2_exec_control(v) & CPU_BASED_ACTIVATE_IO_BITMAP) ) )
         nvcpu->nv_vmentry_pending = 1;
@@ -1529,7 +1529,7 @@ int nvmx_handle_vmresume(struct cpu_user_regs *regs)
     if ( rc != X86EMUL_OKAY )
         return rc;
 
-    if ( vcpu_nestedhvm(v).nv_vvmcxaddr == VMCX_EADDR )
+    if ( vcpu_nestedhvm(v).nv_vvmcxaddr == INVALID_PADDR )
     {
         vmreturn (regs, VMFAIL_INVALID);
         return X86EMUL_OKAY;        
@@ -1554,7 +1554,7 @@ int nvmx_handle_vmlaunch(struct cpu_user_regs *regs)
     if ( rc != X86EMUL_OKAY )
         return rc;
 
-    if ( vcpu_nestedhvm(v).nv_vvmcxaddr == VMCX_EADDR )
+    if ( vcpu_nestedhvm(v).nv_vvmcxaddr == INVALID_PADDR )
     {
         vmreturn (regs, VMFAIL_INVALID);
         return X86EMUL_OKAY;
@@ -1599,7 +1599,7 @@ int nvmx_handle_vmptrld(struct cpu_user_regs *regs)
     if ( nvcpu->nv_vvmcxaddr != gpa )
         nvmx_purge_vvmcs(v);
 
-    if ( nvcpu->nv_vvmcxaddr == VMCX_EADDR )
+    if ( nvcpu->nv_vvmcxaddr == INVALID_PADDR )
     {
         bool_t writable;
         void *vvmcx = hvm_map_guest_frame_rw(paddr_to_pfn(gpa), 1, &writable);
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index d485536..7b411a8 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -97,8 +97,6 @@ static inline bool_t hvm_vcpu_io_need_completion(const struct hvm_vcpu_io *vio)
            !vio->io_req.data_is_ptr;
 }
 
-#define VMCX_EADDR    (~0ULL)
-
 struct nestedvcpu {
     bool_t nv_guestmode; /* vcpu in guestmode? */
     void *nv_vvmcx; /* l1 guest virtual VMCB/VMCS */
-- 
2.10.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

  parent reply	other threads:[~2016-12-14 10:11 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-12-14 10:11 [PATCH v2 0/4] vvmx: fix L1 vmxon Haozhong Zhang
2016-12-14 10:11 ` [PATCH v2 1/4] vvmx: set vmxon_region_pa of vcpu out of VMX operation to an invalid address Haozhong Zhang
2016-12-14 10:11 ` [PATCH v2 2/4] vvmx: return VMfail to L1 if L1 vmxon is executed in VMX operation Haozhong Zhang
2016-12-14 10:11 ` [PATCH v2 3/4] vvmx: check the operand of L1 vmxon Haozhong Zhang
2016-12-18 14:02   ` Haozhong Zhang
2016-12-18 14:06     ` Andrew Cooper
2016-12-14 10:11 ` Haozhong Zhang [this message]
2016-12-14 10:16   ` [PATCH v2 4/4] nestedhvm: replace VMCX_EADDR by INVALID_PADDR Jan Beulich
2016-12-14 15:54     ` Konrad Rzeszutek Wilk
2016-12-14 10:18   ` Haozhong Zhang
2016-12-14 16:24     ` Boris Ostrovsky
2016-12-14 11:57   ` Tian, Kevin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20161214101145.11171-5-haozhong.zhang@intel.com \
    --to=haozhong.zhang@intel.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=jbeulich@suse.com \
    --cc=jun.nakajima@intel.com \
    --cc=kevin.tian@intel.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).