* [PATCH] nestedsvm: ns_vmcb -> vvmcb
@ 2011-05-18 9:39 Christoph Egger
2011-05-20 7:57 ` Keir Fraser
0 siblings, 1 reply; 2+ messages in thread
From: Christoph Egger @ 2011-05-18 9:39 UTC (permalink / raw)
To: xen-devel@lists.xensource.com
[-- Attachment #1: Type: text/plain, Size: 417 bytes --]
Rename the virtual vmcb from ns_vmcb to vvmcb to
make it easier for the reader what is meant.
Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
--
---to satisfy European Law for business letters:
Advanced Micro Devices GmbH
Einsteinring 24, 85689 Dornach b. Muenchen
Geschaeftsfuehrer: Alberto Bozzo, Andrew Bowd
Sitz: Dornach, Gemeinde Aschheim, Landkreis Muenchen
Registergericht Muenchen, HRB Nr. 43632
[-- Attachment #2: xen_nh_vvmcb.diff --]
[-- Type: text/plain, Size: 24021 bytes --]
diff -r 6fc41d2ebe57 -r 980ec1b72796 xen/arch/x86/hvm/svm/nestedsvm.c
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -319,7 +319,7 @@ static int nsvm_vmrun_permissionmap(stru
struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
struct nestedsvm *svm = &vcpu_nestedsvm(v);
struct nestedvcpu *nv = &vcpu_nestedhvm(v);
- struct vmcb_struct *ns_vmcb = nv->nv_vvmcx;
+ struct vmcb_struct *vvmcb = nv->nv_vvmcx;
struct vmcb_struct *host_vmcb = arch_svm->vmcb;
unsigned long *ns_msrpm_ptr;
unsigned int i;
@@ -330,7 +330,7 @@ static int nsvm_vmrun_permissionmap(stru
ns_msrpm_ptr = (unsigned long *)svm->ns_cached_msrpm;
ret = hvm_copy_from_guest_phys(svm->ns_cached_msrpm,
- ns_vmcb->_msrpm_base_pa, MSRPM_SIZE);
+ vvmcb->_msrpm_base_pa, MSRPM_SIZE);
if (ret != HVMCOPY_okay) {
gdprintk(XENLOG_ERR, "hvm_copy_from_guest_phys msrpm %u\n", ret);
return 1;
@@ -340,7 +340,7 @@ static int nsvm_vmrun_permissionmap(stru
* if l1 guest intercepts io ports 0x80 and/or 0xED.
*/
svm->ns_oiomap_pa = svm->ns_iomap_pa;
- svm->ns_iomap_pa = ns_vmcb->_iopm_base_pa;
+ svm->ns_iomap_pa = vvmcb->_iopm_base_pa;
ns_viomap = hvm_map_guest_frame_ro(svm->ns_iomap_pa >> PAGE_SHIFT);
ASSERT(ns_viomap != NULL);
@@ -383,15 +383,15 @@ static int nsvm_vmcb_prepare4vmrun(struc
{
struct nestedvcpu *nv = &vcpu_nestedhvm(v);
struct nestedsvm *svm = &vcpu_nestedsvm(v);
- struct vmcb_struct *ns_vmcb, *n1vmcb, *n2vmcb;
+ struct vmcb_struct *vvmcb, *n1vmcb, *n2vmcb;
bool_t vcleanbits_valid;
int rc;
uint64_t cr0;
- ns_vmcb = nv->nv_vvmcx;
+ vvmcb = nv->nv_vvmcx;
n1vmcb = nv->nv_n1vmcx;
n2vmcb = nv->nv_n2vmcx;
- ASSERT(ns_vmcb != NULL);
+ ASSERT(vvmcb != NULL);
ASSERT(n1vmcb != NULL);
ASSERT(n2vmcb != NULL);
@@ -403,15 +403,15 @@ static int nsvm_vmcb_prepare4vmrun(struc
vcleanbits_valid = 0;
#define vcleanbit_set(_name) \
- (vcleanbits_valid && ns_vmcb->cleanbits.fields._name)
+ (vcleanbits_valid && vvmcb->cleanbits.fields._name)
/* Enable l2 guest intercepts */
if (!vcleanbit_set(intercepts)) {
- svm->ns_cr_intercepts = ns_vmcb->_cr_intercepts;
- svm->ns_dr_intercepts = ns_vmcb->_dr_intercepts;
- svm->ns_exception_intercepts = ns_vmcb->_exception_intercepts;
- svm->ns_general1_intercepts = ns_vmcb->_general1_intercepts;
- svm->ns_general2_intercepts = ns_vmcb->_general2_intercepts;
+ svm->ns_cr_intercepts = vvmcb->_cr_intercepts;
+ svm->ns_dr_intercepts = vvmcb->_dr_intercepts;
+ svm->ns_exception_intercepts = vvmcb->_exception_intercepts;
+ svm->ns_general1_intercepts = vvmcb->_general1_intercepts;
+ svm->ns_general2_intercepts = vvmcb->_general2_intercepts;
}
/* We could track the cleanbits of the n1vmcb from
@@ -432,25 +432,25 @@ static int nsvm_vmcb_prepare4vmrun(struc
*/
n2vmcb->_cr_intercepts =
- n1vmcb->_cr_intercepts | ns_vmcb->_cr_intercepts;
+ n1vmcb->_cr_intercepts | vvmcb->_cr_intercepts;
n2vmcb->_dr_intercepts =
- n1vmcb->_dr_intercepts | ns_vmcb->_dr_intercepts;
+ n1vmcb->_dr_intercepts | vvmcb->_dr_intercepts;
n2vmcb->_exception_intercepts =
- n1vmcb->_exception_intercepts | ns_vmcb->_exception_intercepts;
+ n1vmcb->_exception_intercepts | vvmcb->_exception_intercepts;
n2vmcb->_general1_intercepts =
- n1vmcb->_general1_intercepts | ns_vmcb->_general1_intercepts;
+ n1vmcb->_general1_intercepts | vvmcb->_general1_intercepts;
n2vmcb->_general2_intercepts =
- n1vmcb->_general2_intercepts | ns_vmcb->_general2_intercepts;
+ n1vmcb->_general2_intercepts | vvmcb->_general2_intercepts;
/* Nested Pause Filter */
- if (ns_vmcb->_general1_intercepts & GENERAL1_INTERCEPT_PAUSE)
+ if (vvmcb->_general1_intercepts & GENERAL1_INTERCEPT_PAUSE)
n2vmcb->_pause_filter_count =
- min(n1vmcb->_pause_filter_count, ns_vmcb->_pause_filter_count);
+ min(n1vmcb->_pause_filter_count, vvmcb->_pause_filter_count);
else
n2vmcb->_pause_filter_count = n1vmcb->_pause_filter_count;
/* TSC offset */
- n2vmcb->_tsc_offset = n1vmcb->_tsc_offset + ns_vmcb->_tsc_offset;
+ n2vmcb->_tsc_offset = n1vmcb->_tsc_offset + vvmcb->_tsc_offset;
/* Nested IO permission bitmaps */
rc = nsvm_vmrun_permissionmap(v, vcleanbit_set(iopm));
@@ -460,35 +460,35 @@ static int nsvm_vmcb_prepare4vmrun(struc
/* ASID - Emulation handled in hvm_asid_handle_vmenter() */
/* TLB control */
- n2vmcb->tlb_control = n1vmcb->tlb_control | ns_vmcb->tlb_control;
+ n2vmcb->tlb_control = n1vmcb->tlb_control | vvmcb->tlb_control;
/* Virtual Interrupts */
if (!vcleanbit_set(tpr)) {
- n2vmcb->_vintr = ns_vmcb->_vintr;
+ n2vmcb->_vintr = vvmcb->_vintr;
n2vmcb->_vintr.fields.intr_masking = 1;
}
/* Shadow Mode */
- n2vmcb->interrupt_shadow = ns_vmcb->interrupt_shadow;
+ n2vmcb->interrupt_shadow = vvmcb->interrupt_shadow;
/* Exit codes */
- n2vmcb->exitcode = ns_vmcb->exitcode;
- n2vmcb->exitinfo1 = ns_vmcb->exitinfo1;
- n2vmcb->exitinfo2 = ns_vmcb->exitinfo2;
- n2vmcb->exitintinfo = ns_vmcb->exitintinfo;
+ n2vmcb->exitcode = vvmcb->exitcode;
+ n2vmcb->exitinfo1 = vvmcb->exitinfo1;
+ n2vmcb->exitinfo2 = vvmcb->exitinfo2;
+ n2vmcb->exitintinfo = vvmcb->exitintinfo;
/* Pending Interrupts */
- n2vmcb->eventinj = ns_vmcb->eventinj;
+ n2vmcb->eventinj = vvmcb->eventinj;
/* LBR virtualization */
if (!vcleanbit_set(lbr)) {
- svm->ns_lbr_control = ns_vmcb->lbr_control;
+ svm->ns_lbr_control = vvmcb->lbr_control;
}
n2vmcb->lbr_control.bytes =
- n1vmcb->lbr_control.bytes | ns_vmcb->lbr_control.bytes;
+ n1vmcb->lbr_control.bytes | vvmcb->lbr_control.bytes;
/* NextRIP */
- n2vmcb->nextrip = ns_vmcb->nextrip;
+ n2vmcb->nextrip = vvmcb->nextrip;
/*
* VMCB Save State Area
@@ -496,40 +496,40 @@ static int nsvm_vmcb_prepare4vmrun(struc
/* Segments */
if (!vcleanbit_set(seg)) {
- n2vmcb->es = ns_vmcb->es;
- n2vmcb->cs = ns_vmcb->cs;
- n2vmcb->ss = ns_vmcb->ss;
- n2vmcb->ds = ns_vmcb->ds;
+ n2vmcb->es = vvmcb->es;
+ n2vmcb->cs = vvmcb->cs;
+ n2vmcb->ss = vvmcb->ss;
+ n2vmcb->ds = vvmcb->ds;
/* CPL */
- n2vmcb->_cpl = ns_vmcb->_cpl;
+ n2vmcb->_cpl = vvmcb->_cpl;
}
if (!vcleanbit_set(dt)) {
- n2vmcb->gdtr = ns_vmcb->gdtr;
- n2vmcb->idtr = ns_vmcb->idtr;
+ n2vmcb->gdtr = vvmcb->gdtr;
+ n2vmcb->idtr = vvmcb->idtr;
}
/* EFER */
- v->arch.hvm_vcpu.guest_efer = ns_vmcb->_efer;
- rc = hvm_set_efer(ns_vmcb->_efer);
+ v->arch.hvm_vcpu.guest_efer = vvmcb->_efer;
+ rc = hvm_set_efer(vvmcb->_efer);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_efer failed, rc: %u\n", rc);
/* CR4 */
- v->arch.hvm_vcpu.guest_cr[4] = ns_vmcb->_cr4;
- rc = hvm_set_cr4(ns_vmcb->_cr4);
+ v->arch.hvm_vcpu.guest_cr[4] = vvmcb->_cr4;
+ rc = hvm_set_cr4(vvmcb->_cr4);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr4 failed, rc: %u\n", rc);
/* CR0 */
svm->ns_cr0 = v->arch.hvm_vcpu.guest_cr[0];
- cr0 = nestedsvm_fpu_vmentry(svm->ns_cr0, ns_vmcb, n1vmcb, n2vmcb);
- v->arch.hvm_vcpu.guest_cr[0] = ns_vmcb->_cr0;
+ cr0 = nestedsvm_fpu_vmentry(svm->ns_cr0, vvmcb, n1vmcb, n2vmcb);
+ v->arch.hvm_vcpu.guest_cr[0] = vvmcb->_cr0;
rc = hvm_set_cr0(cr0);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr0 failed, rc: %u\n", rc);
/* CR2 */
- v->arch.hvm_vcpu.guest_cr[2] = ns_vmcb->_cr2;
+ v->arch.hvm_vcpu.guest_cr[2] = vvmcb->_cr2;
hvm_update_guest_cr(v, 2);
/* Nested paging mode */
@@ -537,10 +537,10 @@ static int nsvm_vmcb_prepare4vmrun(struc
/* host nested paging + guest nested paging. */
n2vmcb->_np_enable = 1;
- nestedsvm_vmcb_set_nestedp2m(v, ns_vmcb, n2vmcb);
+ nestedsvm_vmcb_set_nestedp2m(v, vvmcb, n2vmcb);
/* hvm_set_cr3() below sets v->arch.hvm_vcpu.guest_cr[3] for us. */
- rc = hvm_set_cr3(ns_vmcb->_cr3);
+ rc = hvm_set_cr3(vvmcb->_cr3);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr3 failed, rc: %u\n", rc);
} else if (paging_mode_hap(v->domain)) {
@@ -552,7 +552,7 @@ static int nsvm_vmcb_prepare4vmrun(struc
* we assume it intercepts page faults.
*/
/* hvm_set_cr3() below sets v->arch.hvm_vcpu.guest_cr[3] for us. */
- rc = hvm_set_cr3(ns_vmcb->_cr3);
+ rc = hvm_set_cr3(vvmcb->_cr3);
if (rc != X86EMUL_OKAY)
gdprintk(XENLOG_ERR, "hvm_set_cr3 failed, rc: %u\n", rc);
} else {
@@ -567,21 +567,21 @@ static int nsvm_vmcb_prepare4vmrun(struc
/* DRn */
if (!vcleanbit_set(dr)) {
- n2vmcb->_dr7 = ns_vmcb->_dr7;
- n2vmcb->_dr6 = ns_vmcb->_dr6;
+ n2vmcb->_dr7 = vvmcb->_dr7;
+ n2vmcb->_dr6 = vvmcb->_dr6;
}
/* RFLAGS */
- n2vmcb->rflags = ns_vmcb->rflags;
+ n2vmcb->rflags = vvmcb->rflags;
/* RIP */
- n2vmcb->rip = ns_vmcb->rip;
+ n2vmcb->rip = vvmcb->rip;
/* RSP */
- n2vmcb->rsp = ns_vmcb->rsp;
+ n2vmcb->rsp = vvmcb->rsp;
/* RAX */
- n2vmcb->rax = ns_vmcb->rax;
+ n2vmcb->rax = vvmcb->rax;
/* Keep the host values of the fs, gs, ldtr, tr, kerngsbase,
* star, lstar, cstar, sfmask, sysenter_cs, sysenter_esp,
@@ -589,31 +589,31 @@ static int nsvm_vmcb_prepare4vmrun(struc
*/
/* Page tables */
- n2vmcb->pdpe0 = ns_vmcb->pdpe0;
- n2vmcb->pdpe1 = ns_vmcb->pdpe1;
- n2vmcb->pdpe2 = ns_vmcb->pdpe2;
- n2vmcb->pdpe3 = ns_vmcb->pdpe3;
+ n2vmcb->pdpe0 = vvmcb->pdpe0;
+ n2vmcb->pdpe1 = vvmcb->pdpe1;
+ n2vmcb->pdpe2 = vvmcb->pdpe2;
+ n2vmcb->pdpe3 = vvmcb->pdpe3;
/* PAT */
if (!vcleanbit_set(np)) {
- n2vmcb->_g_pat = ns_vmcb->_g_pat;
+ n2vmcb->_g_pat = vvmcb->_g_pat;
}
if (!vcleanbit_set(lbr)) {
/* Debug Control MSR */
- n2vmcb->_debugctlmsr = ns_vmcb->_debugctlmsr;
+ n2vmcb->_debugctlmsr = vvmcb->_debugctlmsr;
/* LBR MSRs */
- n2vmcb->_lastbranchfromip = ns_vmcb->_lastbranchfromip;
- n2vmcb->_lastbranchtoip = ns_vmcb->_lastbranchtoip;
- n2vmcb->_lastintfromip = ns_vmcb->_lastintfromip;
- n2vmcb->_lastinttoip = ns_vmcb->_lastinttoip;
+ n2vmcb->_lastbranchfromip = vvmcb->_lastbranchfromip;
+ n2vmcb->_lastbranchtoip = vvmcb->_lastbranchtoip;
+ n2vmcb->_lastintfromip = vvmcb->_lastintfromip;
+ n2vmcb->_lastinttoip = vvmcb->_lastinttoip;
}
/* Cleanbits */
n2vmcb->cleanbits.bytes = 0;
- rc = svm_vmcb_isvalid(__func__, ns_vmcb, 1);
+ rc = svm_vmcb_isvalid(__func__, vvmcb, 1);
if (rc) {
gdprintk(XENLOG_ERR, "virtual vmcb invalid\n");
return rc;
@@ -626,10 +626,10 @@ static int nsvm_vmcb_prepare4vmrun(struc
}
/* Switch guest registers to l2 guest */
- regs->eax = ns_vmcb->rax;
- regs->eip = ns_vmcb->rip;
- regs->esp = ns_vmcb->rsp;
- regs->eflags = ns_vmcb->rflags;
+ regs->eax = vvmcb->rax;
+ regs->eip = vvmcb->rip;
+ regs->esp = vvmcb->rsp;
+ regs->eflags = vvmcb->rflags;
#undef vcleanbit_set
return 0;
@@ -642,33 +642,33 @@ nsvm_vcpu_vmentry(struct vcpu *v, struct
int ret;
struct nestedvcpu *nv = &vcpu_nestedhvm(v);
struct nestedsvm *svm = &vcpu_nestedsvm(v);
- struct vmcb_struct *ns_vmcb;
+ struct vmcb_struct *vvmcb;
- ns_vmcb = nv->nv_vvmcx;
- ASSERT(ns_vmcb != NULL);
+ vvmcb = nv->nv_vvmcx;
+ ASSERT(vvmcb != NULL);
ASSERT(nv->nv_n2vmcx != NULL);
ASSERT(nv->nv_n2vmcx_pa != VMCX_EADDR);
/* Save values for later use. Needed for Nested-on-Nested and
* Shadow-on-Shadow paging.
*/
- svm->ns_vmcb_guestcr3 = ns_vmcb->_cr3;
- svm->ns_vmcb_hostcr3 = ns_vmcb->_h_cr3;
+ svm->ns_vmcb_guestcr3 = vvmcb->_cr3;
+ svm->ns_vmcb_hostcr3 = vvmcb->_h_cr3;
- nv->nv_flushp2m = ns_vmcb->tlb_control;
- if ( svm->ns_guest_asid != ns_vmcb->_guest_asid )
+ nv->nv_flushp2m = vvmcb->tlb_control;
+ if ( svm->ns_guest_asid != vvmcb->_guest_asid )
{
nv->nv_flushp2m = 1;
hvm_asid_flush_vcpu_asid(&vcpu_nestedhvm(v).nv_n2asid);
- svm->ns_guest_asid = ns_vmcb->_guest_asid;
+ svm->ns_guest_asid = vvmcb->_guest_asid;
}
/* nested paging for the guest */
- svm->ns_hap_enabled = (ns_vmcb->_np_enable) ? 1 : 0;
+ svm->ns_hap_enabled = (vvmcb->_np_enable) ? 1 : 0;
/* Remember the V_INTR_MASK in hostflags */
svm->ns_hostflags.fields.vintrmask =
- (ns_vmcb->_vintr.fields.intr_masking) ? 1 : 0;
+ (vvmcb->_vintr.fields.intr_masking) ? 1 : 0;
/* Save l1 guest state (= host state) */
ret = nsvm_vcpu_hostsave(v, inst_len);
@@ -735,31 +735,31 @@ nsvm_vcpu_vmexit_inject(struct vcpu *v,
{
struct nestedvcpu *nv = &vcpu_nestedhvm(v);
struct nestedsvm *svm = &vcpu_nestedsvm(v);
- struct vmcb_struct *ns_vmcb;
+ struct vmcb_struct *vvmcb;
ASSERT(svm->ns_gif == 0);
- ns_vmcb = nv->nv_vvmcx;
+ vvmcb = nv->nv_vvmcx;
if (nv->nv_vmexit_pending) {
switch (exitcode) {
case VMEXIT_INTR:
- if ( unlikely(ns_vmcb->eventinj.fields.v)
+ if ( unlikely(vvmcb->eventinj.fields.v)
&& nv->nv_vmentry_pending
- && hvm_event_needs_reinjection(ns_vmcb->eventinj.fields.type,
- ns_vmcb->eventinj.fields.vector) )
+ && hvm_event_needs_reinjection(vvmcb->eventinj.fields.type,
+ vvmcb->eventinj.fields.vector) )
{
- ns_vmcb->exitintinfo.bytes = ns_vmcb->eventinj.bytes;
+ vvmcb->exitintinfo.bytes = vvmcb->eventinj.bytes;
}
break;
case VMEXIT_EXCEPTION_PF:
- ns_vmcb->_cr2 = ns_vmcb->exitinfo2;
+ vvmcb->_cr2 = vvmcb->exitinfo2;
/* fall through */
case VMEXIT_NPF:
/* PF error code */
- ns_vmcb->exitinfo1 = svm->ns_vmexit.exitinfo1;
+ vvmcb->exitinfo1 = svm->ns_vmexit.exitinfo1;
/* fault address */
- ns_vmcb->exitinfo2 = svm->ns_vmexit.exitinfo2;
+ vvmcb->exitinfo2 = svm->ns_vmexit.exitinfo2;
break;
case VMEXIT_EXCEPTION_NP:
case VMEXIT_EXCEPTION_SS:
@@ -767,15 +767,15 @@ nsvm_vcpu_vmexit_inject(struct vcpu *v,
case VMEXIT_EXCEPTION_15:
case VMEXIT_EXCEPTION_MF:
case VMEXIT_EXCEPTION_AC:
- ns_vmcb->exitinfo1 = svm->ns_vmexit.exitinfo1;
+ vvmcb->exitinfo1 = svm->ns_vmexit.exitinfo1;
break;
default:
break;
}
}
- ns_vmcb->exitcode = exitcode;
- ns_vmcb->eventinj.bytes = 0;
+ vvmcb->exitcode = exitcode;
+ vvmcb->eventinj.bytes = 0;
return 0;
}
@@ -879,7 +879,7 @@ nsvm_vmcb_guest_intercepts_exitcode(stru
uint64_t exit_bits;
struct nestedvcpu *nv = &vcpu_nestedhvm(v);
struct nestedsvm *svm = &vcpu_nestedsvm(v);
- struct vmcb_struct *ns_vmcb = nv->nv_vvmcx;
+ struct vmcb_struct *vvmcb = nv->nv_vvmcx;
enum nestedhvm_vmexits vmexits;
switch (exitcode) {
@@ -932,18 +932,18 @@ nsvm_vmcb_guest_intercepts_exitcode(stru
ASSERT(regs != NULL);
nestedsvm_vmcb_map(v, nv->nv_vvmcxaddr);
ASSERT(nv->nv_vvmcx != NULL);
- ns_vmcb = nv->nv_vvmcx;
+ vvmcb = nv->nv_vvmcx;
vmexits = nsvm_vmcb_guest_intercepts_msr(svm->ns_cached_msrpm,
- regs->ecx, ns_vmcb->exitinfo1 != 0);
+ regs->ecx, vvmcb->exitinfo1 != 0);
if (vmexits == NESTEDHVM_VMEXIT_HOST)
return 0;
break;
case VMEXIT_IOIO:
nestedsvm_vmcb_map(v, nv->nv_vvmcxaddr);
ASSERT(nv->nv_vvmcx != NULL);
- ns_vmcb = nv->nv_vvmcx;
- vmexits = nsvm_vmcb_guest_intercepts_ioio(ns_vmcb->_iopm_base_pa,
- ns_vmcb->exitinfo1);
+ vvmcb = nv->nv_vvmcx;
+ vmexits = nsvm_vmcb_guest_intercepts_ioio(vvmcb->_iopm_base_pa,
+ vvmcb->exitinfo1);
if (vmexits == NESTEDHVM_VMEXIT_HOST)
return 0;
break;
@@ -964,7 +964,7 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v)
{
struct nestedvcpu *nv = &vcpu_nestedhvm(v);
struct nestedsvm *svm = &vcpu_nestedsvm(v);
- struct vmcb_struct *ns_vmcb = nv->nv_vvmcx;
+ struct vmcb_struct *vvmcb = nv->nv_vvmcx;
struct vmcb_struct *n2vmcb = nv->nv_n2vmcx;
svm_vmsave(nv->nv_n1vmcx);
@@ -987,24 +987,24 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v)
/* Keep it. It's maintainted by the l1 guest. */
/* ASID */
- /* ns_vmcb->_guest_asid = n2vmcb->_guest_asid; */
+ /* vvmcb->_guest_asid = n2vmcb->_guest_asid; */
/* TLB control */
- ns_vmcb->tlb_control = 0;
+ vvmcb->tlb_control = 0;
/* Virtual Interrupts */
- ns_vmcb->_vintr = n2vmcb->_vintr;
+ vvmcb->_vintr = n2vmcb->_vintr;
if (!(svm->ns_hostflags.fields.vintrmask))
- ns_vmcb->_vintr.fields.intr_masking = 0;
+ vvmcb->_vintr.fields.intr_masking = 0;
/* Shadow mode */
- ns_vmcb->interrupt_shadow = n2vmcb->interrupt_shadow;
+ vvmcb->interrupt_shadow = n2vmcb->interrupt_shadow;
/* Exit codes */
- ns_vmcb->exitcode = n2vmcb->exitcode;
- ns_vmcb->exitinfo1 = n2vmcb->exitinfo1;
- ns_vmcb->exitinfo2 = n2vmcb->exitinfo2;
- ns_vmcb->exitintinfo = n2vmcb->exitintinfo;
+ vvmcb->exitcode = n2vmcb->exitcode;
+ vvmcb->exitinfo1 = n2vmcb->exitinfo1;
+ vvmcb->exitinfo2 = n2vmcb->exitinfo2;
+ vvmcb->exitintinfo = n2vmcb->exitintinfo;
/* Interrupts */
/* If we emulate a VMRUN/#VMEXIT in the same host #VMEXIT cycle we have
@@ -1018,79 +1018,79 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v)
hvm_event_needs_reinjection(n2vmcb->eventinj.fields.type,
n2vmcb->eventinj.fields.vector) )
{
- ns_vmcb->exitintinfo = n2vmcb->eventinj;
+ vvmcb->exitintinfo = n2vmcb->eventinj;
}
- ns_vmcb->eventinj.bytes = 0;
+ vvmcb->eventinj.bytes = 0;
/* Nested paging mode */
if (nestedhvm_paging_mode_hap(v)) {
/* host nested paging + guest nested paging. */
- ns_vmcb->_np_enable = n2vmcb->_np_enable;
- ns_vmcb->_cr3 = n2vmcb->_cr3;
- /* The vmcb->h_cr3 is the shadowed h_cr3. The original
- * unshadowed guest h_cr3 is kept in ns_vmcb->h_cr3,
- * hence we keep the ns_vmcb->h_cr3 value. */
+ vvmcb->_np_enable = n2vmcb->_np_enable;
+ vvmcb->_cr3 = n2vmcb->_cr3;
+ /* The n2vmcb->_h_cr3 is the shadowed _h_cr3. The original
+ * unshadowed guest _h_cr3 is kept in vvmcb->_h_cr3,
+ * hence we keep the vvmcb->_h_cr3 value. */
} else if (paging_mode_hap(v->domain)) {
/* host nested paging + guest shadow paging. */
- ns_vmcb->_np_enable = 0;
+ vvmcb->_np_enable = 0;
/* Throw h_cr3 away. Guest is not allowed to set it or
* it can break out, otherwise (security hole!) */
- ns_vmcb->_h_cr3 = 0x0;
+ vvmcb->_h_cr3 = 0x0;
/* Stop intercepting #PF (already done above
* by restoring cached intercepts). */
- ns_vmcb->_cr3 = n2vmcb->_cr3;
+ vvmcb->_cr3 = n2vmcb->_cr3;
} else {
/* host shadow paging + guest shadow paging. */
- ns_vmcb->_np_enable = 0;
- ns_vmcb->_h_cr3 = 0x0;
+ vvmcb->_np_enable = 0;
+ vvmcb->_h_cr3 = 0x0;
/* The vmcb->_cr3 is the shadowed cr3. The original
- * unshadowed guest cr3 is kept in ns_vmcb->_cr3,
- * hence we keep the ns_vmcb->_cr3 value. */
+ * unshadowed guest cr3 is kept in vvmcb->_cr3,
+ * hence we keep the vvmcb->_cr3 value. */
}
/* LBR virtualization - keep lbr control as is */
/* NextRIP */
- ns_vmcb->nextrip = n2vmcb->nextrip;
+ vvmcb->nextrip = n2vmcb->nextrip;
/*
* VMCB Save State Area
*/
/* Segments */
- ns_vmcb->es = n2vmcb->es;
- ns_vmcb->cs = n2vmcb->cs;
- ns_vmcb->ss = n2vmcb->ss;
- ns_vmcb->ds = n2vmcb->ds;
- ns_vmcb->gdtr = n2vmcb->gdtr;
- ns_vmcb->idtr = n2vmcb->idtr;
+ vvmcb->es = n2vmcb->es;
+ vvmcb->cs = n2vmcb->cs;
+ vvmcb->ss = n2vmcb->ss;
+ vvmcb->ds = n2vmcb->ds;
+ vvmcb->gdtr = n2vmcb->gdtr;
+ vvmcb->idtr = n2vmcb->idtr;
/* CPL */
- ns_vmcb->_cpl = n2vmcb->_cpl;
+ vvmcb->_cpl = n2vmcb->_cpl;
/* EFER */
- ns_vmcb->_efer = n2vmcb->_efer;
+ vvmcb->_efer = n2vmcb->_efer;
/* CRn */
- ns_vmcb->_cr4 = n2vmcb->_cr4;
- ns_vmcb->_cr0 = n2vmcb->_cr0;
+ vvmcb->_cr4 = n2vmcb->_cr4;
+ vvmcb->_cr0 = n2vmcb->_cr0;
/* DRn */
- ns_vmcb->_dr7 = n2vmcb->_dr7;
- ns_vmcb->_dr6 = n2vmcb->_dr6;
+ vvmcb->_dr7 = n2vmcb->_dr7;
+ vvmcb->_dr6 = n2vmcb->_dr6;
/* RFLAGS */
- ns_vmcb->rflags = n2vmcb->rflags;
+ vvmcb->rflags = n2vmcb->rflags;
/* RIP */
- ns_vmcb->rip = n2vmcb->rip;
+ vvmcb->rip = n2vmcb->rip;
/* RSP */
- ns_vmcb->rsp = n2vmcb->rsp;
+ vvmcb->rsp = n2vmcb->rsp;
/* RAX */
- ns_vmcb->rax = n2vmcb->rax;
+ vvmcb->rax = n2vmcb->rax;
/* Keep the l2 guest values of the fs, gs, ldtr, tr, kerngsbase,
* star, lstar, cstar, sfmask, sysenter_cs, sysenter_esp,
@@ -1098,25 +1098,25 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v)
*/
/* CR2 */
- ns_vmcb->_cr2 = n2vmcb->_cr2;
+ vvmcb->_cr2 = n2vmcb->_cr2;
/* Page tables */
- ns_vmcb->pdpe0 = n2vmcb->pdpe0;
- ns_vmcb->pdpe1 = n2vmcb->pdpe1;
- ns_vmcb->pdpe2 = n2vmcb->pdpe2;
- ns_vmcb->pdpe3 = n2vmcb->pdpe3;
+ vvmcb->pdpe0 = n2vmcb->pdpe0;
+ vvmcb->pdpe1 = n2vmcb->pdpe1;
+ vvmcb->pdpe2 = n2vmcb->pdpe2;
+ vvmcb->pdpe3 = n2vmcb->pdpe3;
/* PAT */
- ns_vmcb->_g_pat = n2vmcb->_g_pat;
+ vvmcb->_g_pat = n2vmcb->_g_pat;
/* Debug Control MSR */
- ns_vmcb->_debugctlmsr = n2vmcb->_debugctlmsr;
+ vvmcb->_debugctlmsr = n2vmcb->_debugctlmsr;
/* LBR MSRs */
- ns_vmcb->_lastbranchfromip = n2vmcb->_lastbranchfromip;
- ns_vmcb->_lastbranchtoip = n2vmcb->_lastbranchtoip;
- ns_vmcb->_lastintfromip = n2vmcb->_lastintfromip;
- ns_vmcb->_lastinttoip = n2vmcb->_lastinttoip;
+ vvmcb->_lastbranchfromip = n2vmcb->_lastbranchfromip;
+ vvmcb->_lastbranchtoip = n2vmcb->_lastbranchtoip;
+ vvmcb->_lastintfromip = n2vmcb->_lastintfromip;
+ vvmcb->_lastinttoip = n2vmcb->_lastinttoip;
return 0;
}
diff -r 6fc41d2ebe57 -r 980ec1b72796 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1826,7 +1826,7 @@ asmlinkage void svm_vmexit_handler(struc
if ( vcpu_guestmode ) {
enum nestedhvm_vmexits nsret;
struct nestedvcpu *nv = &vcpu_nestedhvm(v);
- struct vmcb_struct *ns_vmcb = nv->nv_vvmcx;
+ struct vmcb_struct *vvmcb = nv->nv_vvmcx;
uint64_t exitinfo1, exitinfo2;
paging_update_nestedmode(v);
@@ -1835,8 +1835,8 @@ asmlinkage void svm_vmexit_handler(struc
* nestedsvm_check_intercepts() expects to have the correct
* exitinfo1 value there.
*/
- exitinfo1 = ns_vmcb->exitinfo1;
- ns_vmcb->exitinfo1 = vmcb->exitinfo1;
+ exitinfo1 = vvmcb->exitinfo1;
+ vvmcb->exitinfo1 = vmcb->exitinfo1;
nsret = nestedsvm_check_intercepts(v, regs, exit_reason);
switch (nsret) {
case NESTEDHVM_VMEXIT_CONTINUE:
[-- Attachment #3: Type: text/plain, Size: 138 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
^ permalink raw reply [flat|nested] 2+ messages in thread* Re: [PATCH] nestedsvm: ns_vmcb -> vvmcb
2011-05-18 9:39 [PATCH] nestedsvm: ns_vmcb -> vvmcb Christoph Egger
@ 2011-05-20 7:57 ` Keir Fraser
0 siblings, 0 replies; 2+ messages in thread
From: Keir Fraser @ 2011-05-20 7:57 UTC (permalink / raw)
To: Christoph Egger, xen-devel@lists.xensource.com
On 18/05/2011 10:39, "Christoph Egger" <Christoph.Egger@amd.com> wrote:
>
> Rename the virtual vmcb from ns_vmcb to vvmcb to
> make it easier for the reader what is meant.
I don't think vvmcb is a good idea. It's too close to vmcb. I think the
current name is actually okay, at least it is clearly distinct from anything
else. And you use the ns_ prefix quite consistently everywhere -- just
switching this one name would be weird.
-- Keir
> Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2011-05-20 7:57 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2011-05-18 9:39 [PATCH] nestedsvm: ns_vmcb -> vvmcb Christoph Egger
2011-05-20 7:57 ` Keir Fraser
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).