* [PATCH v2 Altp2m cleanup v3 1/3] altp2m cleanup work
2016-08-19 17:22 [PATCH v2 Altp2m cleanup v3 0/3] Cleaning up altp2m code Paul Lai
@ 2016-08-19 17:22 ` Paul Lai
2016-09-02 13:03 ` Jan Beulich
2016-08-19 17:22 ` [PATCH v2 Altp2m cleanup v3 2/3] Move altp2m specific functions to altp2m files Paul Lai
2016-08-19 17:22 ` [PATCH v2 Altp2m cleanup v3 3/3] Making altp2m struct dynamically allocated Paul Lai
2 siblings, 1 reply; 11+ messages in thread
From: Paul Lai @ 2016-08-19 17:22 UTC (permalink / raw)
To: xen-devel; +Cc: ravi.sahita, george.dunlap, jbeulich
Indent goto labels by one space
Inline (header) altp2m functions
Define default behavior in switch
Define max and min for range of altp2m macroed values
Signed-off-by: Paul Lai <paul.c.lai@intel.com>
---
xen/arch/x86/hvm/hvm.c | 46 ++++++++++++++++++++-----------------------
xen/include/asm-x86/hvm/hvm.h | 19 +++++++++++++++---
2 files changed, 37 insertions(+), 28 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 893eff6..1995fa4 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1932,11 +1932,11 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
* Otherwise, this is an error condition. */
rc = fall_through;
-out_put_gfn:
+ out_put_gfn:
__put_gfn(p2m, gfn);
if ( ap2m_active )
__put_gfn(hostp2m, gfn);
-out:
+ out:
/* All of these are delayed until we exit, since we might
* sleep on event ring wait queues, and we must not hold
* locks in such circumstance */
@@ -5213,12 +5213,25 @@ static int do_altp2m_op(
return -EFAULT;
if ( a.pad1 || a.pad2 ||
- (a.version != HVMOP_ALTP2M_INTERFACE_VERSION) ||
- (a.cmd < HVMOP_altp2m_get_domain_state) ||
- (a.cmd > HVMOP_altp2m_change_gfn) )
+ (a.version != HVMOP_ALTP2M_INTERFACE_VERSION) )
return -EINVAL;
- d = (a.cmd != HVMOP_altp2m_vcpu_enable_notify) ?
+ switch( a.cmd )
+ {
+ case HVMOP_altp2m_get_domain_state:
+ case HVMOP_altp2m_set_domain_state:
+ case HVMOP_altp2m_vcpu_enable_notify:
+ case HVMOP_altp2m_create_p2m:
+ case HVMOP_altp2m_destroy_p2m:
+ case HVMOP_altp2m_switch_p2m:
+ case HVMOP_altp2m_set_mem_access:
+ case HVMOP_altp2m_change_gfn:
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ d = ( a.cmd != HVMOP_altp2m_vcpu_enable_notify ) ?
rcu_lock_domain_by_any_id(a.domain) : rcu_lock_current_domain();
if ( d == NULL )
@@ -5335,6 +5348,8 @@ static int do_altp2m_op(
rc = p2m_change_altp2m_gfn(d, a.u.change_gfn.view,
_gfn(a.u.change_gfn.old_gfn),
_gfn(a.u.change_gfn.new_gfn));
+ default:
+ return -EINVAL;
}
out:
@@ -5859,25 +5874,6 @@ void hvm_toggle_singlestep(struct vcpu *v)
v->arch.hvm_vcpu.single_step = !v->arch.hvm_vcpu.single_step;
}
-void altp2m_vcpu_update_p2m(struct vcpu *v)
-{
- if ( hvm_funcs.altp2m_vcpu_update_p2m )
- hvm_funcs.altp2m_vcpu_update_p2m(v);
-}
-
-void altp2m_vcpu_update_vmfunc_ve(struct vcpu *v)
-{
- if ( hvm_funcs.altp2m_vcpu_update_vmfunc_ve )
- hvm_funcs.altp2m_vcpu_update_vmfunc_ve(v);
-}
-
-bool_t altp2m_vcpu_emulate_ve(struct vcpu *v)
-{
- if ( hvm_funcs.altp2m_vcpu_emulate_ve )
- return hvm_funcs.altp2m_vcpu_emulate_ve(v);
- return 0;
-}
-
int hvm_set_mode(struct vcpu *v, int mode)
{
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 314881a..07109b7 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -589,13 +589,26 @@ static inline bool_t hvm_altp2m_supported(void)
}
/* updates the current hardware p2m */
-void altp2m_vcpu_update_p2m(struct vcpu *v);
+static inline void altp2m_vcpu_update_p2m(struct vcpu *v)
+{
+ if ( hvm_funcs.altp2m_vcpu_update_p2m )
+ hvm_funcs.altp2m_vcpu_update_p2m(v);
+}
/* updates VMCS fields related to VMFUNC and #VE */
-void altp2m_vcpu_update_vmfunc_ve(struct vcpu *v);
+static inline void altp2m_vcpu_update_vmfunc_ve(struct vcpu *v)
+{
+ if ( hvm_funcs.altp2m_vcpu_update_vmfunc_ve )
+ hvm_funcs.altp2m_vcpu_update_vmfunc_ve(v);
+}
/* emulates #VE */
-bool_t altp2m_vcpu_emulate_ve(struct vcpu *v);
+static inline bool_t altp2m_vcpu_emulate_ve(struct vcpu *v)
+{
+ if ( hvm_funcs.altp2m_vcpu_emulate_ve )
+ return hvm_funcs.altp2m_vcpu_emulate_ve(v);
+ return 0;
+}
/* Check CR4/EFER values */
const char *hvm_efer_valid(const struct vcpu *v, uint64_t value,
--
2.7.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH v2 Altp2m cleanup v3 2/3] Move altp2m specific functions to altp2m files.
2016-08-19 17:22 [PATCH v2 Altp2m cleanup v3 0/3] Cleaning up altp2m code Paul Lai
2016-08-19 17:22 ` [PATCH v2 Altp2m cleanup v3 1/3] altp2m cleanup work Paul Lai
@ 2016-08-19 17:22 ` Paul Lai
2016-09-02 13:30 ` Jan Beulich
2016-08-19 17:22 ` [PATCH v2 Altp2m cleanup v3 3/3] Making altp2m struct dynamically allocated Paul Lai
2 siblings, 1 reply; 11+ messages in thread
From: Paul Lai @ 2016-08-19 17:22 UTC (permalink / raw)
To: xen-devel; +Cc: ravi.sahita, george.dunlap, jbeulich
Move altp2m specific functions to altp2m files. This makes the code
a little easier to read.
Also moving ept code to ept specific files as requested in:
https://lists.xenproject.org/archives/html/xen-devel/2015-07/msg04323.html
Signed-off-by: Paul Lai <paul.c.lai@intel.com>
---
xen/arch/x86/mm/altp2m.c | 45 +++++++++++++++++++++++++++++++++++++++
xen/arch/x86/mm/hap/hap.c | 35 +++++-------------------------
xen/arch/x86/mm/p2m-ept.c | 39 +++++++++++++++++++++++++++++++++
xen/arch/x86/mm/p2m.c | 45 +++------------------------------------
xen/include/asm-x86/altp2m.h | 4 +++-
xen/include/asm-x86/hvm/vmx/vmx.h | 3 +++
xen/include/asm-x86/p2m.h | 9 +++-----
7 files changed, 101 insertions(+), 79 deletions(-)
diff --git a/xen/arch/x86/mm/altp2m.c b/xen/arch/x86/mm/altp2m.c
index 930bdc2..cc9b0fc 100644
--- a/xen/arch/x86/mm/altp2m.c
+++ b/xen/arch/x86/mm/altp2m.c
@@ -17,6 +17,7 @@
#include <asm/hvm/support.h>
#include <asm/hvm/hvm.h>
+#include <asm/domain.h>
#include <asm/p2m.h>
#include <asm/altp2m.h>
@@ -65,6 +66,50 @@ altp2m_vcpu_destroy(struct vcpu *v)
vcpu_unpause(v);
}
+int
+hvm_altp2m_init( struct domain *d)
+{
+ int rc = 0;
+ unsigned int i = 0;
+
+ /* Init alternate p2m data. */
+ if ( (d->arch.altp2m_eptp = alloc_xenheap_page()) == NULL )
+ {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ for ( i = 0; i < MAX_EPTP; i++ )
+ d->arch.altp2m_eptp[i] = mfn_x(INVALID_MFN);
+
+ for ( i = 0; i < MAX_ALTP2M; i++ )
+ {
+ rc = p2m_alloc_table(d->arch.altp2m_p2m[i]);
+ if ( rc != 0 )
+ goto out;
+ }
+
+ d->arch.altp2m_active = 0;
+ out:
+ return rc;
+}
+
+void
+hvm_altp2m_teardown( struct domain *d)
+{
+ unsigned int i = 0;
+ d->arch.altp2m_active = 0;
+
+ if ( d->arch.altp2m_eptp )
+ {
+ free_xenheap_page(d->arch.altp2m_eptp);
+ d->arch.altp2m_eptp = NULL;
+ }
+
+ for ( i = 0; i < MAX_ALTP2M; i++ )
+ p2m_teardown(d->arch.altp2m_p2m[i]);
+}
+
/*
* Local variables:
* mode: C
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index 3218fa2..6e1e9a5 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -37,6 +37,7 @@
#include <asm/hap.h>
#include <asm/paging.h>
#include <asm/p2m.h>
+#include <asm/altp2m.h>
#include <asm/domain.h>
#include <xen/numa.h>
#include <asm/hvm/nestedhvm.h>
@@ -501,24 +502,9 @@ int hap_enable(struct domain *d, u32 mode)
if ( hvm_altp2m_supported() )
{
- /* Init alternate p2m data */
- if ( (d->arch.altp2m_eptp = alloc_xenheap_page()) == NULL )
- {
- rv = -ENOMEM;
- goto out;
- }
-
- for ( i = 0; i < MAX_EPTP; i++ )
- d->arch.altp2m_eptp[i] = mfn_x(INVALID_MFN);
-
- for ( i = 0; i < MAX_ALTP2M; i++ )
- {
- rv = p2m_alloc_table(d->arch.altp2m_p2m[i]);
- if ( rv != 0 )
- goto out;
- }
-
- d->arch.altp2m_active = 0;
+ rv = hvm_altp2m_init(d);
+ if ( rv != 0 )
+ goto out;
}
/* Now let other users see the new mode */
@@ -534,18 +520,7 @@ void hap_final_teardown(struct domain *d)
unsigned int i;
if ( hvm_altp2m_supported() )
- {
- d->arch.altp2m_active = 0;
-
- if ( d->arch.altp2m_eptp )
- {
- free_xenheap_page(d->arch.altp2m_eptp);
- d->arch.altp2m_eptp = NULL;
- }
-
- for ( i = 0; i < MAX_ALTP2M; i++ )
- p2m_teardown(d->arch.altp2m_p2m[i]);
- }
+ hvm_altp2m_teardown(d);
/* Destroy nestedp2m's first */
for (i = 0; i < MAX_NESTEDP2M; i++) {
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 13cab24..8247a02 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -1329,6 +1329,45 @@ void setup_ept_dump(void)
register_keyhandler('D', ept_dump_p2m_table, "dump VT-x EPT tables", 0);
}
+void p2m_init_altp2m_ept_helper( struct domain *d, unsigned int i)
+{
+ struct p2m_domain *p2m = d->arch.altp2m_p2m[i];
+ struct ept_data *ept;
+
+ p2m->min_remapped_gfn = gfn_x(INVALID_GFN);
+ p2m->max_remapped_gfn = 0;
+ ept = &p2m->ept;
+ ept->asr = pagetable_get_pfn(p2m_get_pagetable(p2m));
+ d->arch.altp2m_eptp[i] = ept_get_eptp(ept);
+}
+
+unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp)
+{
+ struct p2m_domain *p2m;
+ struct ept_data *ept;
+ unsigned int i;
+
+ altp2m_list_lock(d);
+
+ for ( i = 0; i < MAX_ALTP2M; i++ )
+ {
+ if ( d->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) )
+ continue;
+
+ p2m = d->arch.altp2m_p2m[i];
+ ept = &p2m->ept;
+
+ if ( eptp == ept_get_eptp(ept) )
+ goto out;
+ }
+
+ i = INVALID_ALTP2M;
+
+ out:
+ altp2m_list_unlock(d);
+ return i;
+}
+
/*
* Local variables:
* mode: C
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index ff0cce8..7673663 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -196,8 +196,8 @@ static void p2m_teardown_altp2m(struct domain *d)
if ( !d->arch.altp2m_p2m[i] )
continue;
p2m = d->arch.altp2m_p2m[i];
- d->arch.altp2m_p2m[i] = NULL;
p2m_free_one(p2m);
+ d->arch.altp2m_p2m[i] = NULL;
}
}
@@ -2278,33 +2278,6 @@ int unmap_mmio_regions(struct domain *d,
return i == nr ? 0 : i ?: ret;
}
-unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp)
-{
- struct p2m_domain *p2m;
- struct ept_data *ept;
- unsigned int i;
-
- altp2m_list_lock(d);
-
- for ( i = 0; i < MAX_ALTP2M; i++ )
- {
- if ( d->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) )
- continue;
-
- p2m = d->arch.altp2m_p2m[i];
- ept = &p2m->ept;
-
- if ( eptp == ept_get_eptp(ept) )
- goto out;
- }
-
- i = INVALID_ALTP2M;
-
- out:
- altp2m_list_unlock(d);
- return i;
-}
-
bool_t p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, unsigned int idx)
{
struct domain *d = v->domain;
@@ -2410,18 +2383,6 @@ void p2m_flush_altp2m(struct domain *d)
altp2m_list_unlock(d);
}
-static void p2m_init_altp2m_helper(struct domain *d, unsigned int i)
-{
- struct p2m_domain *p2m = d->arch.altp2m_p2m[i];
- struct ept_data *ept;
-
- p2m->min_remapped_gfn = gfn_x(INVALID_GFN);
- p2m->max_remapped_gfn = 0;
- ept = &p2m->ept;
- ept->asr = pagetable_get_pfn(p2m_get_pagetable(p2m));
- d->arch.altp2m_eptp[i] = ept_get_eptp(ept);
-}
-
int p2m_init_altp2m_by_id(struct domain *d, unsigned int idx)
{
int rc = -EINVAL;
@@ -2433,7 +2394,7 @@ int p2m_init_altp2m_by_id(struct domain *d, unsigned int idx)
if ( d->arch.altp2m_eptp[idx] == mfn_x(INVALID_MFN) )
{
- p2m_init_altp2m_helper(d, idx);
+ p2m_init_altp2m_ept_helper(d, idx);
rc = 0;
}
@@ -2453,7 +2414,7 @@ int p2m_init_next_altp2m(struct domain *d, uint16_t *idx)
if ( d->arch.altp2m_eptp[i] != mfn_x(INVALID_MFN) )
continue;
- p2m_init_altp2m_helper(d, i);
+ p2m_init_altp2m_ept_helper(d, i);
*idx = i;
rc = 0;
diff --git a/xen/include/asm-x86/altp2m.h b/xen/include/asm-x86/altp2m.h
index 64c7618..a236ed7 100644
--- a/xen/include/asm-x86/altp2m.h
+++ b/xen/include/asm-x86/altp2m.h
@@ -18,7 +18,6 @@
#ifndef __ASM_X86_ALTP2M_H
#define __ASM_X86_ALTP2M_H
-#include <xen/types.h>
#include <xen/sched.h> /* for struct vcpu, struct domain */
#include <asm/hvm/vcpu.h> /* for vcpu_altp2m */
@@ -38,4 +37,7 @@ static inline uint16_t altp2m_vcpu_idx(const struct vcpu *v)
return vcpu_altp2m(v).p2midx;
}
+int hvm_altp2m_init(struct domain *d);
+void hvm_altp2m_teardown(struct domain *d);
+
#endif /* __ASM_X86_ALTP2M_H */
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h b/xen/include/asm-x86/hvm/vmx/vmx.h
index 4cdd9b1..0383a9d 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -561,6 +561,9 @@ void ept_p2m_uninit(struct p2m_domain *p2m);
void ept_walk_table(struct domain *d, unsigned long gfn);
bool_t ept_handle_misconfig(uint64_t gpa);
void setup_ept_dump(void);
+void p2m_init_altp2m_ept_helper( struct domain *d, unsigned int i);
+/* Locate an alternate p2m by its EPTP */
+unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp);
void update_guest_eip(void);
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index d5fd546..0c5b391 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -23,8 +23,8 @@
* along with this program; If not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef _XEN_P2M_H
-#define _XEN_P2M_H
+#ifndef _XEN_ASM_X86_P2M_H
+#define _XEN_ASM_X86_P2M_H
#include <xen/config.h>
#include <xen/paging.h>
@@ -781,9 +781,6 @@ static inline struct p2m_domain *p2m_get_altp2m(struct vcpu *v)
return v->domain->arch.altp2m_p2m[index];
}
-/* Locate an alternate p2m by its EPTP */
-unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp);
-
/* Switch alternate p2m for a single vcpu */
bool_t p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, unsigned int idx);
@@ -845,7 +842,7 @@ static inline unsigned int p2m_get_iommu_flags(p2m_type_t p2mt)
return flags;
}
-#endif /* _XEN_P2M_H */
+#endif /* _XEN_ASM_X86_P2M_H */
/*
* Local variables:
--
2.7.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH v2 Altp2m cleanup v3 3/3] Making altp2m struct dynamically allocated.
2016-08-19 17:22 [PATCH v2 Altp2m cleanup v3 0/3] Cleaning up altp2m code Paul Lai
2016-08-19 17:22 ` [PATCH v2 Altp2m cleanup v3 1/3] altp2m cleanup work Paul Lai
2016-08-19 17:22 ` [PATCH v2 Altp2m cleanup v3 2/3] Move altp2m specific functions to altp2m files Paul Lai
@ 2016-08-19 17:22 ` Paul Lai
2016-09-02 13:47 ` Jan Beulich
2 siblings, 1 reply; 11+ messages in thread
From: Paul Lai @ 2016-08-19 17:22 UTC (permalink / raw)
To: xen-devel; +Cc: ravi.sahita, george.dunlap, jbeulich
Ravi Sahita's dynamically allocated altp2m structs
Signed-off-by: Paul Lai <paul.c.lai@intel.com>
Reviewed-by: Ravi Sahita <ravi.sahita@intel.com>
---
xen/arch/x86/hvm/hvm.c | 8 +++---
xen/arch/x86/hvm/vmx/vmx.c | 2 +-
xen/arch/x86/mm/altp2m.c | 18 ++++++-------
xen/arch/x86/mm/mm-locks.h | 4 +--
xen/arch/x86/mm/p2m-ept.c | 10 ++++----
xen/arch/x86/mm/p2m.c | 61 ++++++++++++++++++++++++--------------------
xen/common/monitor.c | 1 +
xen/include/asm-x86/altp2m.h | 7 ++++-
xen/include/asm-x86/domain.h | 6 ++---
xen/include/asm-x86/p2m.h | 9 ++++++-
10 files changed, 72 insertions(+), 54 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 1995fa4..115d8e7 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -5245,7 +5245,7 @@ static int do_altp2m_op(
if ( (a.cmd != HVMOP_altp2m_get_domain_state) &&
(a.cmd != HVMOP_altp2m_set_domain_state) &&
- !d->arch.altp2m_active )
+ !altp2m_active(d) )
{
rc = -EOPNOTSUPP;
goto out;
@@ -5279,11 +5279,11 @@ static int do_altp2m_op(
break;
}
- ostate = d->arch.altp2m_active;
- d->arch.altp2m_active = !!a.u.domain_state.state;
+ ostate = altp2m_active(d);
+ set_altp2m_active(d, !!a.u.domain_state.state);
/* If the alternate p2m state has changed, handle appropriately */
- if ( d->arch.altp2m_active != ostate &&
+ if ( altp2m_active(d) != ostate &&
(ostate || !(rc = p2m_init_altp2m_by_id(d, 0))) )
{
for_each_vcpu( d, v )
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 3d330b6..e7c8d04 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2018,7 +2018,7 @@ static void vmx_vcpu_update_vmfunc_ve(struct vcpu *v)
{
v->arch.hvm_vmx.secondary_exec_control |= mask;
__vmwrite(VM_FUNCTION_CONTROL, VMX_VMFUNC_EPTP_SWITCHING);
- __vmwrite(EPTP_LIST_ADDR, virt_to_maddr(d->arch.altp2m_eptp));
+ __vmwrite(EPTP_LIST_ADDR, virt_to_maddr(d->arch.altp2m->altp2m_eptp));
if ( cpu_has_vmx_virt_exceptions )
{
diff --git a/xen/arch/x86/mm/altp2m.c b/xen/arch/x86/mm/altp2m.c
index cc9b0fc..eec9ffc 100644
--- a/xen/arch/x86/mm/altp2m.c
+++ b/xen/arch/x86/mm/altp2m.c
@@ -73,23 +73,23 @@ hvm_altp2m_init( struct domain *d)
unsigned int i = 0;
/* Init alternate p2m data. */
- if ( (d->arch.altp2m_eptp = alloc_xenheap_page()) == NULL )
+ if ( (d->arch.altp2m->altp2m_eptp = alloc_xenheap_page()) == NULL )
{
rc = -ENOMEM;
goto out;
}
for ( i = 0; i < MAX_EPTP; i++ )
- d->arch.altp2m_eptp[i] = mfn_x(INVALID_MFN);
+ d->arch.altp2m->altp2m_eptp[i] = mfn_x(INVALID_MFN);
for ( i = 0; i < MAX_ALTP2M; i++ )
{
- rc = p2m_alloc_table(d->arch.altp2m_p2m[i]);
+ rc = p2m_alloc_table(d->arch.altp2m->altp2m_p2m[i]);
if ( rc != 0 )
goto out;
}
- d->arch.altp2m_active = 0;
+ set_altp2m_active(d, 0);
out:
return rc;
}
@@ -98,16 +98,16 @@ void
hvm_altp2m_teardown( struct domain *d)
{
unsigned int i = 0;
- d->arch.altp2m_active = 0;
+ set_altp2m_active(d, 0);
- if ( d->arch.altp2m_eptp )
+ if ( d->arch.altp2m->altp2m_eptp )
{
- free_xenheap_page(d->arch.altp2m_eptp);
- d->arch.altp2m_eptp = NULL;
+ free_xenheap_page(d->arch.altp2m->altp2m_eptp);
+ d->arch.altp2m->altp2m_eptp = NULL;
}
for ( i = 0; i < MAX_ALTP2M; i++ )
- p2m_teardown(d->arch.altp2m_p2m[i]);
+ p2m_teardown(d->arch.altp2m->altp2m_p2m[i]);
}
/*
diff --git a/xen/arch/x86/mm/mm-locks.h b/xen/arch/x86/mm/mm-locks.h
index 086c8bb..4d17b0a 100644
--- a/xen/arch/x86/mm/mm-locks.h
+++ b/xen/arch/x86/mm/mm-locks.h
@@ -251,8 +251,8 @@ declare_mm_rwlock(p2m);
*/
declare_mm_lock(altp2mlist)
-#define altp2m_list_lock(d) mm_lock(altp2mlist, &(d)->arch.altp2m_list_lock)
-#define altp2m_list_unlock(d) mm_unlock(&(d)->arch.altp2m_list_lock)
+#define altp2m_list_lock(d) mm_lock(altp2mlist, &(d)->arch.altp2m->altp2m_list_lock)
+#define altp2m_list_unlock(d) mm_unlock(&(d)->arch.altp2m->altp2m_list_lock)
/* P2M lock (per-altp2m-table)
*
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 8247a02..74b2754 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -1331,14 +1331,14 @@ void setup_ept_dump(void)
void p2m_init_altp2m_ept_helper( struct domain *d, unsigned int i)
{
- struct p2m_domain *p2m = d->arch.altp2m_p2m[i];
+ struct p2m_domain *p2m = d->arch.altp2m->altp2m_p2m[i];
struct ept_data *ept;
p2m->min_remapped_gfn = gfn_x(INVALID_GFN);
- p2m->max_remapped_gfn = 0;
+ p2m->max_remapped_gfn = gfn_x(_gfn(0UL));
ept = &p2m->ept;
ept->asr = pagetable_get_pfn(p2m_get_pagetable(p2m));
- d->arch.altp2m_eptp[i] = ept_get_eptp(ept);
+ d->arch.altp2m->altp2m_eptp[i] = ept_get_eptp(ept);
}
unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp)
@@ -1351,10 +1351,10 @@ unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp)
for ( i = 0; i < MAX_ALTP2M; i++ )
{
- if ( d->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) )
+ if ( d->arch.altp2m->altp2m_eptp[i] == mfn_x(INVALID_MFN) )
continue;
- p2m = d->arch.altp2m_p2m[i];
+ p2m = d->arch.altp2m->altp2m_p2m[i];
ept = &p2m->ept;
if ( eptp == ept_get_eptp(ept) )
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 7673663..956ce3a 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -193,12 +193,15 @@ static void p2m_teardown_altp2m(struct domain *d)
for ( i = 0; i < MAX_ALTP2M; i++ )
{
- if ( !d->arch.altp2m_p2m[i] )
+ if ( !d->arch.altp2m->altp2m_p2m[i] )
continue;
- p2m = d->arch.altp2m_p2m[i];
+ p2m = d->arch.altp2m->altp2m_p2m[i];
p2m_free_one(p2m);
- d->arch.altp2m_p2m[i] = NULL;
+ d->arch.altp2m->altp2m_p2m[i] = NULL;
}
+
+ if ( d->arch.altp2m )
+ xfree(d->arch.altp2m);
}
static int p2m_init_altp2m(struct domain *d)
@@ -206,10 +209,14 @@ static int p2m_init_altp2m(struct domain *d)
unsigned int i;
struct p2m_domain *p2m;
- mm_lock_init(&d->arch.altp2m_list_lock);
+ d->arch.altp2m = xzalloc(struct altp2m_domain);
+ if ( d->arch.altp2m == NULL )
+ return -ENOMEM;
+
+ mm_lock_init(&d->arch.altp2m->altp2m_list_lock);
for ( i = 0; i < MAX_ALTP2M; i++ )
{
- d->arch.altp2m_p2m[i] = p2m = p2m_init_one(d);
+ d->arch.altp2m->altp2m_p2m[i] = p2m = p2m_init_one(d);
if ( p2m == NULL )
{
p2m_teardown_altp2m(d);
@@ -1844,10 +1851,10 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
if ( altp2m_idx )
{
if ( altp2m_idx >= MAX_ALTP2M ||
- d->arch.altp2m_eptp[altp2m_idx] == mfn_x(INVALID_MFN) )
+ d->arch.altp2m->altp2m_eptp[altp2m_idx] == mfn_x(INVALID_MFN) )
return -EINVAL;
- ap2m = d->arch.altp2m_p2m[altp2m_idx];
+ ap2m = d->arch.altp2m->altp2m_p2m[altp2m_idx];
}
switch ( access )
@@ -2288,7 +2295,7 @@ bool_t p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, unsigned int idx)
altp2m_list_lock(d);
- if ( d->arch.altp2m_eptp[idx] != mfn_x(INVALID_MFN) )
+ if ( d->arch.altp2m->altp2m_eptp[idx] != mfn_x(INVALID_MFN) )
{
if ( idx != vcpu_altp2m(v).p2midx )
{
@@ -2373,11 +2380,11 @@ void p2m_flush_altp2m(struct domain *d)
for ( i = 0; i < MAX_ALTP2M; i++ )
{
- p2m_flush_table(d->arch.altp2m_p2m[i]);
+ p2m_flush_table(d->arch.altp2m->altp2m_p2m[i]);
/* Uninit and reinit ept to force TLB shootdown */
- ept_p2m_uninit(d->arch.altp2m_p2m[i]);
- ept_p2m_init(d->arch.altp2m_p2m[i]);
- d->arch.altp2m_eptp[i] = mfn_x(INVALID_MFN);
+ ept_p2m_uninit(d->arch.altp2m->altp2m_p2m[i]);
+ ept_p2m_init(d->arch.altp2m->altp2m_p2m[i]);
+ d->arch.altp2m->altp2m_eptp[i] = mfn_x(INVALID_MFN);
}
altp2m_list_unlock(d);
@@ -2392,7 +2399,7 @@ int p2m_init_altp2m_by_id(struct domain *d, unsigned int idx)
altp2m_list_lock(d);
- if ( d->arch.altp2m_eptp[idx] == mfn_x(INVALID_MFN) )
+ if ( d->arch.altp2m->altp2m_eptp[idx] == mfn_x(INVALID_MFN) )
{
p2m_init_altp2m_ept_helper(d, idx);
rc = 0;
@@ -2411,7 +2418,7 @@ int p2m_init_next_altp2m(struct domain *d, uint16_t *idx)
for ( i = 0; i < MAX_ALTP2M; i++ )
{
- if ( d->arch.altp2m_eptp[i] != mfn_x(INVALID_MFN) )
+ if ( d->arch.altp2m->altp2m_eptp[i] != mfn_x(INVALID_MFN) )
continue;
p2m_init_altp2m_ept_helper(d, i);
@@ -2437,17 +2444,17 @@ int p2m_destroy_altp2m_by_id(struct domain *d, unsigned int idx)
altp2m_list_lock(d);
- if ( d->arch.altp2m_eptp[idx] != mfn_x(INVALID_MFN) )
+ if ( d->arch.altp2m->altp2m_eptp[idx] != mfn_x(INVALID_MFN) )
{
- p2m = d->arch.altp2m_p2m[idx];
+ p2m = d->arch.altp2m->altp2m_p2m[idx];
if ( !_atomic_read(p2m->active_vcpus) )
{
- p2m_flush_table(d->arch.altp2m_p2m[idx]);
+ p2m_flush_table(d->arch.altp2m->altp2m_p2m[idx]);
/* Uninit and reinit ept to force TLB shootdown */
- ept_p2m_uninit(d->arch.altp2m_p2m[idx]);
- ept_p2m_init(d->arch.altp2m_p2m[idx]);
- d->arch.altp2m_eptp[idx] = mfn_x(INVALID_MFN);
+ ept_p2m_uninit(d->arch.altp2m->altp2m_p2m[idx]);
+ ept_p2m_init(d->arch.altp2m->altp2m_p2m[idx]);
+ d->arch.altp2m->altp2m_eptp[idx] = mfn_x(INVALID_MFN);
rc = 0;
}
}
@@ -2471,7 +2478,7 @@ int p2m_switch_domain_altp2m_by_id(struct domain *d, unsigned int idx)
altp2m_list_lock(d);
- if ( d->arch.altp2m_eptp[idx] != mfn_x(INVALID_MFN) )
+ if ( d->arch.altp2m->altp2m_eptp[idx] != mfn_x(INVALID_MFN) )
{
for_each_vcpu( d, v )
if ( idx != vcpu_altp2m(v).p2midx )
@@ -2502,11 +2509,11 @@ int p2m_change_altp2m_gfn(struct domain *d, unsigned int idx,
unsigned int page_order;
int rc = -EINVAL;
- if ( idx >= MAX_ALTP2M || d->arch.altp2m_eptp[idx] == mfn_x(INVALID_MFN) )
+ if ( idx >= MAX_ALTP2M || d->arch.altp2m->altp2m_eptp[idx] == mfn_x(INVALID_MFN) )
return rc;
hp2m = p2m_get_hostp2m(d);
- ap2m = d->arch.altp2m_p2m[idx];
+ ap2m = d->arch.altp2m->altp2m_p2m[idx];
p2m_lock(ap2m);
@@ -2597,10 +2604,10 @@ void p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
for ( i = 0; i < MAX_ALTP2M; i++ )
{
- if ( d->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) )
+ if ( d->arch.altp2m->altp2m_eptp[i] == mfn_x(INVALID_MFN) )
continue;
- p2m = d->arch.altp2m_p2m[i];
+ p2m = d->arch.altp2m->altp2m_p2m[i];
m = get_gfn_type_access(p2m, gfn_x(gfn), &t, &a, 0, NULL);
/* Check for a dropped page that may impact this altp2m */
@@ -2621,10 +2628,10 @@ void p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
for ( i = 0; i < MAX_ALTP2M; i++ )
{
if ( i == last_reset_idx ||
- d->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) )
+ d->arch.altp2m->altp2m_eptp[i] == mfn_x(INVALID_MFN) )
continue;
- p2m = d->arch.altp2m_p2m[i];
+ p2m = d->arch.altp2m->altp2m_p2m[i];
p2m_lock(p2m);
p2m_reset_altp2m(p2m);
p2m_unlock(p2m);
diff --git a/xen/common/monitor.c b/xen/common/monitor.c
index c73d1d5..d6b3f90 100644
--- a/xen/common/monitor.c
+++ b/xen/common/monitor.c
@@ -24,6 +24,7 @@
#include <xen/sched.h>
#include <xen/vm_event.h>
#include <xsm/xsm.h>
+#include <asm/p2m.h>
#include <asm/altp2m.h>
#include <asm/monitor.h>
#include <asm/vm_event.h>
diff --git a/xen/include/asm-x86/altp2m.h b/xen/include/asm-x86/altp2m.h
index a236ed7..a2e6740 100644
--- a/xen/include/asm-x86/altp2m.h
+++ b/xen/include/asm-x86/altp2m.h
@@ -24,7 +24,12 @@
/* Alternate p2m HVM on/off per domain */
static inline bool_t altp2m_active(const struct domain *d)
{
- return d->arch.altp2m_active;
+ return d->arch.altp2m->altp2m_active;
+}
+
+static inline void set_altp2m_active(const struct domain *d, bool_t v)
+{
+ d->arch.altp2m->altp2m_active = v;
}
/* Alternate p2m VCPU */
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 5807a1f..3b9fcc1 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -242,6 +242,7 @@ typedef xen_domctl_cpuid_t cpuid_input_t;
#define INVALID_ALTP2M 0xffff
#define MAX_EPTP (PAGE_SIZE / sizeof(uint64_t))
struct p2m_domain;
+struct altp2m_domain;
struct time_scale {
int shift;
u32 mul_frac;
@@ -320,10 +321,7 @@ struct arch_domain
mm_lock_t nested_p2m_lock;
/* altp2m: allow multiple copies of host p2m */
- bool_t altp2m_active;
- struct p2m_domain *altp2m_p2m[MAX_ALTP2M];
- mm_lock_t altp2m_list_lock;
- uint64_t *altp2m_eptp;
+ struct altp2m_domain *altp2m;
/* NB. protected by d->event_lock and by irq_desc[irq].lock */
struct radix_tree_root irq_pirq;
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 0c5b391..d8c40a6 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -338,6 +338,13 @@ struct p2m_domain {
};
};
+struct altp2m_domain {
+ bool_t altp2m_active;
+ struct p2m_domain *altp2m_p2m[MAX_ALTP2M];
+ mm_lock_t altp2m_list_lock;
+ uint64_t *altp2m_eptp;
+};
+
/* get host p2m table */
#define p2m_get_hostp2m(d) ((d)->arch.p2m)
@@ -778,7 +785,7 @@ static inline struct p2m_domain *p2m_get_altp2m(struct vcpu *v)
BUG_ON(index >= MAX_ALTP2M);
- return v->domain->arch.altp2m_p2m[index];
+ return v->domain->arch.altp2m->altp2m_p2m[index];
}
/* Switch alternate p2m for a single vcpu */
--
2.7.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply related [flat|nested] 11+ messages in thread