xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Christoph Egger <Christoph.Egger@amd.com>
To: xen-devel@lists.xensource.com
Cc: Tim Deegan <Tim.Deegan@citrix.com>
Subject: [PATCH] p2m: move phystable into p2m
Date: Thu, 20 May 2010 11:18:59 +0200	[thread overview]
Message-ID: <201005201118.59894.Christoph.Egger@amd.com> (raw)

[-- Attachment #1: Type: text/plain, Size: 790 bytes --]


Hi Tim!

This patch moves phys_table from struct domain to struct p2m_domain.
This is a resend of the patch you had acknowleged when you reviewed
my nestedhvm patches.
The difference is that I removed the build dependency of the p2m cleanup patch
you wanted to have benchmark results so that it applies against xen-unstable.

Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>

P.S.: Keir: This patch is against c/s 21400 but it should apply cleanly 
against latest c/s since p2m code didn't change.

-- 
---to satisfy European Law for business letters:
Advanced Micro Devices GmbH
Einsteinring 24, 85609 Dornach b. Muenchen
Geschaeftsfuehrer: Andrew Bowd, Thomas M. McCoy, Giuliano Meroni
Sitz: Dornach, Gemeinde Aschheim, Landkreis Muenchen
Registergericht Muenchen, HRB Nr. 43632

[-- Attachment #2: xen_p2m_phystable.diff --]
[-- Type: text/x-diff, Size: 14785 bytes --]

# HG changeset patch
# User cegger
# Date 1274087935 -7200
Move phys_table from struct domain to struct p2m_domain.
This prepares p2m code to deal with multiple p2m tables per-domain.
Multiple p2m tables are needed to use hap with nested virtualization.

diff -r 90e8817dd642 -r d8fcc16ce408 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -215,6 +215,7 @@ static int svm_vmcb_restore(struct vcpu 
     unsigned long mfn = 0;
     p2m_type_t p2mt;
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
 
     if ( c->pending_valid &&
          ((c->pending_type == 1) || (c->pending_type > 6) ||
@@ -260,7 +261,7 @@ static int svm_vmcb_restore(struct vcpu 
     {
         vmcb->np_enable = 1;
         vmcb->g_pat = MSR_IA32_CR_PAT_RESET; /* guest PAT */
-        vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
+        vmcb->h_cr3 = pagetable_get_paddr(p2m_get_pagetable(p2m));
     }
 
     if ( c->pending_valid ) 
diff -r 90e8817dd642 -r d8fcc16ce408 xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c
+++ b/xen/arch/x86/hvm/svm/vmcb.c
@@ -26,7 +26,7 @@
 #include <asm/cpufeature.h>
 #include <asm/processor.h>
 #include <asm/msr.h>
-#include <asm/paging.h>
+#include <asm/p2m.h>
 #include <asm/hvm/hvm.h>
 #include <asm/hvm/io.h>
 #include <asm/hvm/support.h>
@@ -232,7 +232,7 @@ static int construct_vmcb(struct vcpu *v
     {
         vmcb->np_enable = 1; /* enable nested paging */
         vmcb->g_pat = MSR_IA32_CR_PAT_RESET; /* guest PAT */
-        vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
+        vmcb->h_cr3 = pagetable_get_paddr(p2m_get_pagetable(p2m_get_hostp2m(v->domain)));
 
         /* No point in intercepting CR3 reads/writes. */
         vmcb->cr_intercepts &= ~(CR_INTERCEPT_CR3_READ|CR_INTERCEPT_CR3_WRITE);
diff -r 90e8817dd642 -r d8fcc16ce408 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -81,7 +81,7 @@ static int vmx_domain_initialise(struct 
     d->arch.hvm_domain.vmx.ept_control.etmt = EPT_DEFAULT_MT;
     d->arch.hvm_domain.vmx.ept_control.gaw  = EPT_DEFAULT_GAW;
     d->arch.hvm_domain.vmx.ept_control.asr  =
-        pagetable_get_pfn(d->arch.phys_table);
+        pagetable_get_pfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
 
 
     if ( (rc = vmx_alloc_vlapic_mapping(d)) != 0 )
diff -r 90e8817dd642 -r d8fcc16ce408 xen/arch/x86/mm/hap/hap.c
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -410,7 +410,7 @@ static void hap_install_xen_entries_in_l
 
     /* Install the domain-specific P2M table */
     l4e[l4_table_offset(RO_MPT_VIRT_START)] =
-        l4e_from_pfn(mfn_x(pagetable_get_mfn(d->arch.phys_table)),
+        l4e_from_pfn(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))),
                      __PAGE_HYPERVISOR);
 
     hap_unmap_domain_page(l4e);
@@ -421,6 +421,7 @@ static void hap_install_xen_entries_in_l
 static void hap_install_xen_entries_in_l2h(struct vcpu *v, mfn_t l2hmfn)
 {
     struct domain *d = v->domain;
+    struct p2m_domain *hostp2m = p2m_get_hostp2m(d);
     l2_pgentry_t *l2e;
     l3_pgentry_t *p2m;
     int i;
@@ -446,8 +447,8 @@ static void hap_install_xen_entries_in_l
             l2e_empty();
 
     /* Install the domain-specific p2m table */
-    ASSERT(pagetable_get_pfn(d->arch.phys_table) != 0);
-    p2m = hap_map_domain_page(pagetable_get_mfn(d->arch.phys_table));
+    ASSERT(pagetable_get_pfn(p2m_get_pagetable(hostp2m)) != 0);
+    p2m = hap_map_domain_page(pagetable_get_mfn(p2m_get_pagetable(hostp2m)));
     for ( i = 0; i < MACHPHYS_MBYTES>>1; i++ )
     {
         l2e[l2_table_offset(RO_MPT_VIRT_START) + i] =
diff -r 90e8817dd642 -r d8fcc16ce408 xen/arch/x86/mm/hap/p2m-ept.c
--- a/xen/arch/x86/mm/hap/p2m-ept.c
+++ b/xen/arch/x86/mm/hap/p2m-ept.c
@@ -242,12 +242,13 @@ ept_set_entry(struct domain *d, unsigned
     int direct_mmio = (p2mt == p2m_mmio_direct);
     uint8_t ipat = 0;
     int need_modify_vtd_table = 1;
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
     if (  order != 0 )
         if ( (gfn & ((1UL << order) - 1)) )
             return 1;
 
-    table = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
+    table = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
 
     ASSERT(table != NULL);
 
@@ -370,7 +371,7 @@ static mfn_t ept_get_entry(struct domain
                            p2m_query_t q)
 {
     ept_entry_t *table =
-        map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
+        map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))));
     unsigned long gfn_remainder = gfn;
     ept_entry_t *ept_entry;
     u32 index;
@@ -464,7 +465,7 @@ out:
 static ept_entry_t ept_get_entry_content(struct domain *d, unsigned long gfn, int *level)
 {
     ept_entry_t *table =
-        map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
+        map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))));
     unsigned long gfn_remainder = gfn;
     ept_entry_t *ept_entry;
     ept_entry_t content = { .epte = 0 };
@@ -499,7 +500,7 @@ static ept_entry_t ept_get_entry_content
 void ept_walk_table(struct domain *d, unsigned long gfn)
 {
     ept_entry_t *table =
-        map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
+        map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))));
     unsigned long gfn_remainder = gfn;
 
     int i;
@@ -639,12 +640,12 @@ static void ept_change_entry_type_global
     int i2;
     int i1;
 
-    if ( pagetable_get_pfn(d->arch.phys_table) == 0 )
+    if ( pagetable_get_pfn(p2m_get_pagetable(p2m_get_hostp2m(d))) == 0 )
         return;
 
     BUG_ON(EPT_DEFAULT_GAW != 3);
 
-    l4e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
+    l4e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))));
     for (i4 = 0; i4 < EPT_PAGETABLE_ENTRIES; i4++ )
     {
         if ( !l4e[i4].epte )
@@ -739,12 +740,14 @@ static void ept_dump_p2m_table(unsigned 
     unsigned long index;
     unsigned long gfn, gfn_remainder;
     unsigned long record_counter = 0;
+    struct p2m_domain *p2m;
 
     for_each_domain(d)
     {
         if ( !(is_hvm_domain(d) && d->arch.hvm_domain.hap_enabled) )
             continue;
 
+        p2m = p2m_get_hostp2m(d);
         printk("\ndomain%d EPT p2m table: \n", d->domain_id);
 
         for ( gfn = 0; gfn <= d->arch.p2m->max_mapped_pfn; gfn += (1 << order) )
@@ -752,7 +755,7 @@ static void ept_dump_p2m_table(unsigned 
             gfn_remainder = gfn;
             mfn = _mfn(INVALID_MFN);
             table =
-                map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
+                map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
 
             for ( i = EPT_DEFAULT_GAW; i > 0; i-- )
             {
diff -r 90e8817dd642 -r d8fcc16ce408 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1253,7 +1253,7 @@ p2m_set_entry(struct domain *d, unsigned
               unsigned int page_order, p2m_type_t p2mt)
 {
     // XXX -- this might be able to be faster iff current->domain == d
-    mfn_t table_mfn = pagetable_get_mfn(d->arch.phys_table);
+    mfn_t table_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
     void *table =map_domain_page(mfn_x(table_mfn));
     unsigned long i, gfn_remainder = gfn;
     l1_pgentry_t *p2m_entry;
@@ -1408,7 +1408,7 @@ p2m_gfn_to_mfn(struct domain *d, unsigne
      * XXX we will return p2m_invalid for unmapped gfns */
     *t = p2m_mmio_dm;
 
-    mfn = pagetable_get_mfn(d->arch.phys_table);
+    mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
 
     if ( gfn > d->arch.p2m->max_mapped_pfn )
         /* This pfn is higher than the highest the p2m map currently holds */
@@ -1798,11 +1798,11 @@ int p2m_alloc_table(struct domain *d,
     struct page_info *page, *p2m_top;
     unsigned int page_count = 0;
     unsigned long gfn = -1UL;
-    struct p2m_domain *p2m = d->arch.p2m;
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
     p2m_lock(p2m);
 
-    if ( pagetable_get_pfn(d->arch.phys_table) != 0 )
+    if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) != 0 )
     {
         P2M_ERROR("p2m already allocated for this domain\n");
         p2m_unlock(p2m);
@@ -1828,7 +1828,7 @@ int p2m_alloc_table(struct domain *d,
         return -ENOMEM;
     }
 
-    d->arch.phys_table = pagetable_from_mfn(page_to_mfn(p2m_top));
+    p2m->phys_table = pagetable_from_mfn(page_to_mfn(p2m_top));
 
     P2M_PRINTK("populating p2m table\n");
 
@@ -1872,7 +1872,7 @@ void p2m_teardown(struct domain *d)
  * We know we don't have any extra mappings to these pages */
 {
     struct page_info *pg;
-    struct p2m_domain *p2m = d->arch.p2m;
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
     unsigned long gfn;
     p2m_type_t t;
     mfn_t mfn;
@@ -1884,7 +1884,7 @@ void p2m_teardown(struct domain *d)
         if(mfn_valid(mfn) && (t == p2m_ram_shared))
             BUG_ON(mem_sharing_unshare_page(d, gfn, MEM_SHARING_DESTROY_GFN));
     }
-    d->arch.phys_table = pagetable_null();
+    p2m->phys_table = pagetable_null();
 
     while ( (pg = page_list_remove_head(&p2m->pages)) )
         p2m->free_page(d, pg);
@@ -1995,7 +1995,7 @@ static void audit_p2m(struct domain *d)
     spin_unlock(&d->page_alloc_lock);
 
     /* Audit part two: walk the domain's p2m table, checking the entries. */
-    if ( pagetable_get_pfn(d->arch.phys_table) != 0 )
+    if ( pagetable_get_pfn(p2m_get_pagetable(p2m_get_hostp2m(d)) != 0 )
     {
         l2_pgentry_t *l2e;
         l1_pgentry_t *l1e;
@@ -2005,11 +2005,11 @@ static void audit_p2m(struct domain *d)
         l4_pgentry_t *l4e;
         l3_pgentry_t *l3e;
         int i3, i4;
-        l4e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
+        l4e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))));
 #else /* CONFIG_PAGING_LEVELS == 3 */
         l3_pgentry_t *l3e;
         int i3;
-        l3e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
+        l3e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))));
 #endif
 
         gfn = 0;
@@ -2421,22 +2421,23 @@ void p2m_change_type_global(struct domai
     l4_pgentry_t *l4e;
     unsigned long i4;
 #endif /* CONFIG_PAGING_LEVELS == 4 */
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
     BUG_ON(p2m_is_grant(ot) || p2m_is_grant(nt));
 
     if ( !paging_mode_translate(d) )
         return;
 
-    if ( pagetable_get_pfn(d->arch.phys_table) == 0 )
+    if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) == 0 )
         return;
 
     ASSERT(p2m_locked_by_me(d->arch.p2m));
 
 #if CONFIG_PAGING_LEVELS == 4
-    l4e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
+    l4e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
 #else /* CONFIG_PAGING_LEVELS == 3 */
-    l3mfn = _mfn(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
-    l3e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));
+    l3mfn = _mfn(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
+    l3e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
 #endif
 
 #if CONFIG_PAGING_LEVELS >= 4
diff -r 90e8817dd642 -r d8fcc16ce408 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -3177,7 +3177,7 @@ int shadow_enable(struct domain *d, u32 
  out_locked:
     shadow_unlock(d);
  out_unlocked:
-    if ( rv != 0 && !pagetable_is_null(d->arch.phys_table) )
+    if ( rv != 0 && !pagetable_is_null(p2m_get_pagetable(p2m_get_hostp2m(d))) )
         p2m_teardown(d);
     if ( rv != 0 && pg != NULL )
         shadow_free_p2m_page(d, pg);
diff -r 90e8817dd642 -r d8fcc16ce408 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -1476,7 +1476,7 @@ void sh_install_xen_entries_in_l4(struct
     {
         /* install domain-specific P2M table */
         sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] =
-            shadow_l4e_from_mfn(pagetable_get_mfn(d->arch.phys_table),
+            shadow_l4e_from_mfn(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d))),
                                 __PAGE_HYPERVISOR);
     }
 
@@ -1535,8 +1535,8 @@ static void sh_install_xen_entries_in_l2
     {
         /* Install the domain-specific p2m table */
         l3_pgentry_t *p2m;
-        ASSERT(pagetable_get_pfn(d->arch.phys_table) != 0);
-        p2m = sh_map_domain_page(pagetable_get_mfn(d->arch.phys_table));
+        ASSERT(pagetable_get_pfn(p2m_get_pagetable(p2m_get_hostp2m(d))) != 0);
+        p2m = sh_map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d))));
         for ( i = 0; i < MACHPHYS_MBYTES>>1; i++ )
         {
             sl2e[shadow_l2_table_offset(RO_MPT_VIRT_START) + i] =
diff -r 90e8817dd642 -r d8fcc16ce408 xen/include/asm-ia64/p2m_entry.h
--- a/xen/include/asm-ia64/p2m_entry.h
+++ b/xen/include/asm-ia64/p2m_entry.h
@@ -63,6 +63,8 @@ p2m_entry_retry(struct p2m_entry* entry)
 #endif
 }
 
+#define p2m_get_hostp2m(d) (d)
+
 #endif // __ASM_P2M_ENTRY_H__
 
 /*
diff -r 90e8817dd642 -r d8fcc16ce408 xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -247,9 +247,6 @@ struct arch_domain
     struct paging_domain paging;
     struct p2m_domain *p2m;
 
-    /* Shadow translated domain: P2M mapping */
-    pagetable_t phys_table;
-
     /* NB. protected by d->event_lock and by irq_desc[irq].lock */
     int *irq_pirq;
     int *pirq_irq;
diff -r 90e8817dd642 -r d8fcc16ce408 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -29,6 +29,7 @@
 #include <xen/config.h>
 #include <xen/paging.h>
 #include <asm/mem_sharing.h>
+#include <asm/page.h>    /* for pagetable_t */
 
 /*
  * The phys_to_machine_mapping maps guest physical frame numbers 
@@ -166,6 +167,9 @@ struct p2m_domain {
     int                locker;   /* processor which holds the lock */
     const char        *locker_function; /* Func that took it */
 
+    /* Shadow translated domain: p2m mapping */
+    pagetable_t        phys_table;
+
     /* Pages used to construct the p2m */
     struct page_list_head pages;
 
@@ -215,6 +219,11 @@ struct p2m_domain {
     } pod;
 };
 
+/* get host p2m table */
+#define p2m_get_hostp2m(d)      ((d)->arch.p2m)
+
+#define p2m_get_pagetable(p2m)  ((p2m)->phys_table)
+
 /*
  * The P2M lock.  This protects all updates to the p2m table.
  * Updates are expected to be safe against concurrent reads,

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

             reply	other threads:[~2010-05-20  9:18 UTC|newest]

Thread overview: 3+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2010-05-20  9:18 Christoph Egger [this message]
2010-05-20 13:11 ` [PATCH] p2m: move phystable into p2m Keir Fraser
2010-05-20 14:09 ` Tim Deegan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=201005201118.59894.Christoph.Egger@amd.com \
    --to=christoph.egger@amd.com \
    --cc=Tim.Deegan@citrix.com \
    --cc=xen-devel@lists.xensource.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).