From: Mukesh Rathor <mukesh.rathor@oracle.com>
To: Xen-devel@lists.xensource.com
Cc: keir.xen@gmail.com, JBeulich@suse.com
Subject: [RFC 0 PATCH 2/3] PVH dom0: move some pv specific code to static functions
Date: Wed, 25 Sep 2013 14:03:07 -0700 [thread overview]
Message-ID: <1380142988-9487-3-git-send-email-mukesh.rathor@oracle.com> (raw)
In-Reply-To: <1380142988-9487-1-git-send-email-mukesh.rathor@oracle.com>
In this preparatory patch also, some pv specific code is
carved out into static functions. No functionality change.
Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
---
xen/arch/x86/domain_build.c | 358 +++++++++++++++++++++++-------------------
1 files changed, 196 insertions(+), 162 deletions(-)
diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
index 232adf8..5125aa2 100644
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -307,6 +307,197 @@ static void __init process_dom0_ioports_disable(void)
}
}
+static __init void mark_pv_pt_pages_rdonly(struct domain *d,
+ l4_pgentry_t *l4start,
+ unsigned long vpt_start,
+ unsigned long nr_pt_pages)
+{
+ unsigned long count;
+ struct page_info *page;
+ l4_pgentry_t *l4tab;
+ l3_pgentry_t *l3tab, *l3start;
+ l2_pgentry_t *l2tab, *l2start;
+ l1_pgentry_t *l1tab, *l1start;
+
+ /* Pages that are part of page tables must be read only. */
+ l4tab = l4start + l4_table_offset(vpt_start);
+ l3start = l3tab = l4e_to_l3e(*l4tab);
+ l3tab += l3_table_offset(vpt_start);
+ l2start = l2tab = l3e_to_l2e(*l3tab);
+ l2tab += l2_table_offset(vpt_start);
+ l1start = l1tab = l2e_to_l1e(*l2tab);
+ l1tab += l1_table_offset(vpt_start);
+ for ( count = 0; count < nr_pt_pages; count++ )
+ {
+ l1e_remove_flags(*l1tab, _PAGE_RW);
+ page = mfn_to_page(l1e_get_pfn(*l1tab));
+
+ /* Read-only mapping + PGC_allocated + page-table page. */
+ page->count_info = PGC_allocated | 3;
+ page->u.inuse.type_info |= PGT_validated | 1;
+
+ /* Top-level p.t. is pinned. */
+ if ( (page->u.inuse.type_info & PGT_type_mask) ==
+ (!is_pv_32on64_domain(d) ?
+ PGT_l4_page_table : PGT_l3_page_table) )
+ {
+ page->count_info += 1;
+ page->u.inuse.type_info += 1 | PGT_pinned;
+ }
+
+ /* Iterate. */
+ if ( !((unsigned long)++l1tab & (PAGE_SIZE - 1)) )
+ {
+ if ( !((unsigned long)++l2tab & (PAGE_SIZE - 1)) )
+ {
+ if ( !((unsigned long)++l3tab & (PAGE_SIZE - 1)) )
+ l3start = l3tab = l4e_to_l3e(*++l4tab);
+ l2start = l2tab = l3e_to_l2e(*l3tab);
+ }
+ l1start = l1tab = l2e_to_l1e(*l2tab);
+ }
+ }
+}
+
+static __init void setup_pv_p2m_table(
+ struct domain *d, struct vcpu *v, struct elf_dom_parms *parms,
+ unsigned long v_start, unsigned long vphysmap_start,
+ unsigned long vphysmap_end, unsigned long v_end, unsigned long nr_pages)
+{
+ struct page_info *page = NULL;
+ l4_pgentry_t *l4tab = NULL, *l4start = NULL;
+ l3_pgentry_t *l3tab = NULL;
+ l2_pgentry_t *l2tab = NULL;
+ l1_pgentry_t *l1tab = NULL;
+
+ l4start = map_domain_page(pagetable_get_pfn(v->arch.guest_table));
+ l3tab = NULL;
+ l2tab = NULL;
+ l1tab = NULL;
+
+ /* Set up the phys->machine table if not part of the initial mapping. */
+ if ( parms->p2m_base != UNSET_ADDR )
+ {
+ unsigned long va = vphysmap_start;
+
+ if ( v_start <= vphysmap_end && vphysmap_start <= v_end )
+ panic("DOM0 P->M table overlaps initial mapping");
+
+ while ( va < vphysmap_end )
+ {
+ if ( d->tot_pages + ((round_pgup(vphysmap_end) - va)
+ >> PAGE_SHIFT) + 3 > nr_pages )
+ panic("Dom0 allocation too small for initial P->M table.\n");
+
+ if ( l1tab )
+ {
+ unmap_domain_page(l1tab);
+ l1tab = NULL;
+ }
+ if ( l2tab )
+ {
+ unmap_domain_page(l2tab);
+ l2tab = NULL;
+ }
+ if ( l3tab )
+ {
+ unmap_domain_page(l3tab);
+ l3tab = NULL;
+ }
+ l4tab = l4start + l4_table_offset(va);
+ if ( !l4e_get_intpte(*l4tab) )
+ {
+ page = alloc_domheap_page(d, 0);
+ if ( !page )
+ break;
+ /* No mapping, PGC_allocated + page-table page. */
+ page->count_info = PGC_allocated | 2;
+ page->u.inuse.type_info =
+ PGT_l3_page_table | PGT_validated | 1;
+ l3tab = __map_domain_page(page);
+ clear_page(l3tab);
+ *l4tab = l4e_from_page(page, L4_PROT);
+ } else
+ l3tab = map_domain_page(l4e_get_pfn(*l4tab));
+ l3tab += l3_table_offset(va);
+ if ( !l3e_get_intpte(*l3tab) )
+ {
+ if ( cpu_has_page1gb &&
+ !(va & ((1UL << L3_PAGETABLE_SHIFT) - 1)) &&
+ vphysmap_end >= va + (1UL << L3_PAGETABLE_SHIFT) &&
+ (page = alloc_domheap_pages(d,
+ L3_PAGETABLE_SHIFT -
+ PAGE_SHIFT,
+ 0)) != NULL )
+ {
+ *l3tab = l3e_from_page(page,
+ L1_PROT|_PAGE_DIRTY|_PAGE_PSE);
+ va += 1UL << L3_PAGETABLE_SHIFT;
+ continue;
+ }
+ if ( (page = alloc_domheap_page(d, 0)) == NULL )
+ break;
+ /* No mapping, PGC_allocated + page-table page. */
+ page->count_info = PGC_allocated | 2;
+ page->u.inuse.type_info =
+ PGT_l2_page_table | PGT_validated | 1;
+ l2tab = __map_domain_page(page);
+ clear_page(l2tab);
+ *l3tab = l3e_from_page(page, L3_PROT);
+ }
+ else
+ l2tab = map_domain_page(l3e_get_pfn(*l3tab));
+ l2tab += l2_table_offset(va);
+ if ( !l2e_get_intpte(*l2tab) )
+ {
+ if ( !(va & ((1UL << L2_PAGETABLE_SHIFT) - 1)) &&
+ vphysmap_end >= va + (1UL << L2_PAGETABLE_SHIFT) &&
+ (page = alloc_domheap_pages(d,
+ L2_PAGETABLE_SHIFT -
+ PAGE_SHIFT,
+ 0)) != NULL )
+ {
+ *l2tab = l2e_from_page(page,
+ L1_PROT|_PAGE_DIRTY|_PAGE_PSE);
+ if ( opt_allow_superpage )
+ get_superpage(page_to_mfn(page), d);
+ va += 1UL << L2_PAGETABLE_SHIFT;
+ continue;
+ }
+ if ( (page = alloc_domheap_page(d, 0)) == NULL )
+ break;
+ /* No mapping, PGC_allocated + page-table page. */
+ page->count_info = PGC_allocated | 2;
+ page->u.inuse.type_info =
+ PGT_l1_page_table | PGT_validated | 1;
+ l1tab = __map_domain_page(page);
+ clear_page(l1tab);
+ *l2tab = l2e_from_page(page, L2_PROT);
+ }
+ else
+ l1tab = map_domain_page(l2e_get_pfn(*l2tab));
+ l1tab += l1_table_offset(va);
+ BUG_ON(l1e_get_intpte(*l1tab));
+ page = alloc_domheap_page(d, 0);
+ if ( !page )
+ break;
+ *l1tab = l1e_from_page(page, L1_PROT|_PAGE_DIRTY);
+ va += PAGE_SIZE;
+ va &= PAGE_MASK;
+ }
+ if ( !page )
+ panic("Not enough RAM for DOM0 P->M table.\n");
+ }
+
+ if ( l1tab )
+ unmap_domain_page(l1tab);
+ if ( l2tab )
+ unmap_domain_page(l2tab);
+ if ( l3tab )
+ unmap_domain_page(l3tab);
+ unmap_domain_page(l4start);
+}
+
int __init construct_dom0(
struct domain *d,
const module_t *image, unsigned long image_headroom,
@@ -705,44 +896,8 @@ int __init construct_dom0(
COMPAT_L2_PAGETABLE_XEN_SLOTS(d) * sizeof(*l2tab));
}
- /* Pages that are part of page tables must be read only. */
- l4tab = l4start + l4_table_offset(vpt_start);
- l3start = l3tab = l4e_to_l3e(*l4tab);
- l3tab += l3_table_offset(vpt_start);
- l2start = l2tab = l3e_to_l2e(*l3tab);
- l2tab += l2_table_offset(vpt_start);
- l1start = l1tab = l2e_to_l1e(*l2tab);
- l1tab += l1_table_offset(vpt_start);
- for ( count = 0; count < nr_pt_pages; count++ )
- {
- l1e_remove_flags(*l1tab, _PAGE_RW);
- page = mfn_to_page(l1e_get_pfn(*l1tab));
-
- /* Read-only mapping + PGC_allocated + page-table page. */
- page->count_info = PGC_allocated | 3;
- page->u.inuse.type_info |= PGT_validated | 1;
-
- /* Top-level p.t. is pinned. */
- if ( (page->u.inuse.type_info & PGT_type_mask) ==
- (!is_pv_32on64_domain(d) ?
- PGT_l4_page_table : PGT_l3_page_table) )
- {
- page->count_info += 1;
- page->u.inuse.type_info += 1 | PGT_pinned;
- }
-
- /* Iterate. */
- if ( !((unsigned long)++l1tab & (PAGE_SIZE - 1)) )
- {
- if ( !((unsigned long)++l2tab & (PAGE_SIZE - 1)) )
- {
- if ( !((unsigned long)++l3tab & (PAGE_SIZE - 1)) )
- l3start = l3tab = l4e_to_l3e(*++l4tab);
- l2start = l2tab = l3e_to_l2e(*l3tab);
- }
- l1start = l1tab = l2e_to_l1e(*l2tab);
- }
- }
+ if ( is_pv_domain(d) )
+ mark_pv_pt_pages_rdonly(d, l4start, vpt_start, nr_pt_pages);
/* Mask all upcalls... */
for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ )
@@ -814,131 +969,10 @@ int __init construct_dom0(
elf_64bit(&elf) ? 64 : 32, parms.pae ? "p" : "");
count = d->tot_pages;
- l4start = map_domain_page(pagetable_get_pfn(v->arch.guest_table));
- l3tab = NULL;
- l2tab = NULL;
- l1tab = NULL;
- /* Set up the phys->machine table if not part of the initial mapping. */
- if ( parms.p2m_base != UNSET_ADDR )
- {
- unsigned long va = vphysmap_start;
- if ( v_start <= vphysmap_end && vphysmap_start <= v_end )
- panic("DOM0 P->M table overlaps initial mapping");
-
- while ( va < vphysmap_end )
- {
- if ( d->tot_pages + ((round_pgup(vphysmap_end) - va)
- >> PAGE_SHIFT) + 3 > nr_pages )
- panic("Dom0 allocation too small for initial P->M table.\n");
-
- if ( l1tab )
- {
- unmap_domain_page(l1tab);
- l1tab = NULL;
- }
- if ( l2tab )
- {
- unmap_domain_page(l2tab);
- l2tab = NULL;
- }
- if ( l3tab )
- {
- unmap_domain_page(l3tab);
- l3tab = NULL;
- }
- l4tab = l4start + l4_table_offset(va);
- if ( !l4e_get_intpte(*l4tab) )
- {
- page = alloc_domheap_page(d, 0);
- if ( !page )
- break;
- /* No mapping, PGC_allocated + page-table page. */
- page->count_info = PGC_allocated | 2;
- page->u.inuse.type_info =
- PGT_l3_page_table | PGT_validated | 1;
- l3tab = __map_domain_page(page);
- clear_page(l3tab);
- *l4tab = l4e_from_page(page, L4_PROT);
- } else
- l3tab = map_domain_page(l4e_get_pfn(*l4tab));
- l3tab += l3_table_offset(va);
- if ( !l3e_get_intpte(*l3tab) )
- {
- if ( cpu_has_page1gb &&
- !(va & ((1UL << L3_PAGETABLE_SHIFT) - 1)) &&
- vphysmap_end >= va + (1UL << L3_PAGETABLE_SHIFT) &&
- (page = alloc_domheap_pages(d,
- L3_PAGETABLE_SHIFT -
- PAGE_SHIFT,
- 0)) != NULL )
- {
- *l3tab = l3e_from_page(page,
- L1_PROT|_PAGE_DIRTY|_PAGE_PSE);
- va += 1UL << L3_PAGETABLE_SHIFT;
- continue;
- }
- if ( (page = alloc_domheap_page(d, 0)) == NULL )
- break;
- /* No mapping, PGC_allocated + page-table page. */
- page->count_info = PGC_allocated | 2;
- page->u.inuse.type_info =
- PGT_l2_page_table | PGT_validated | 1;
- l2tab = __map_domain_page(page);
- clear_page(l2tab);
- *l3tab = l3e_from_page(page, L3_PROT);
- }
- else
- l2tab = map_domain_page(l3e_get_pfn(*l3tab));
- l2tab += l2_table_offset(va);
- if ( !l2e_get_intpte(*l2tab) )
- {
- if ( !(va & ((1UL << L2_PAGETABLE_SHIFT) - 1)) &&
- vphysmap_end >= va + (1UL << L2_PAGETABLE_SHIFT) &&
- (page = alloc_domheap_pages(d,
- L2_PAGETABLE_SHIFT -
- PAGE_SHIFT,
- 0)) != NULL )
- {
- *l2tab = l2e_from_page(page,
- L1_PROT|_PAGE_DIRTY|_PAGE_PSE);
- if ( opt_allow_superpage )
- get_superpage(page_to_mfn(page), d);
- va += 1UL << L2_PAGETABLE_SHIFT;
- continue;
- }
- if ( (page = alloc_domheap_page(d, 0)) == NULL )
- break;
- /* No mapping, PGC_allocated + page-table page. */
- page->count_info = PGC_allocated | 2;
- page->u.inuse.type_info =
- PGT_l1_page_table | PGT_validated | 1;
- l1tab = __map_domain_page(page);
- clear_page(l1tab);
- *l2tab = l2e_from_page(page, L2_PROT);
- }
- else
- l1tab = map_domain_page(l2e_get_pfn(*l2tab));
- l1tab += l1_table_offset(va);
- BUG_ON(l1e_get_intpte(*l1tab));
- page = alloc_domheap_page(d, 0);
- if ( !page )
- break;
- *l1tab = l1e_from_page(page, L1_PROT|_PAGE_DIRTY);
- va += PAGE_SIZE;
- va &= PAGE_MASK;
- }
- if ( !page )
- panic("Not enough RAM for DOM0 P->M table.\n");
- }
-
- if ( l1tab )
- unmap_domain_page(l1tab);
- if ( l2tab )
- unmap_domain_page(l2tab);
- if ( l3tab )
- unmap_domain_page(l3tab);
- unmap_domain_page(l4start);
+ if ( is_pv_domain(d) )
+ setup_pv_p2m_table(d, v, &parms, v_start, vphysmap_start,
+ vphysmap_end, v_end, nr_pages);
/* Write the phys->machine and machine->phys table entries. */
for ( pfn = 0; pfn < count; pfn++ )
--
1.7.2.3
next prev parent reply other threads:[~2013-09-25 21:03 UTC|newest]
Thread overview: 40+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-09-25 21:03 [RFC 0 PATCH 0/3]: PVH dom0 construction Mukesh Rathor
2013-09-25 21:03 ` [RFC 0 PATCH 1/3] PVH dom0: create domctl_memory_mapping() function Mukesh Rathor
2013-09-26 7:03 ` Jan Beulich
2013-09-25 21:03 ` Mukesh Rathor [this message]
2013-09-26 7:21 ` [RFC 0 PATCH 2/3] PVH dom0: move some pv specific code to static functions Jan Beulich
2013-09-26 23:32 ` Mukesh Rathor
2013-09-25 21:03 ` [RFC 0 PATCH 3/3] PVH dom0: construct_dom0 changes Mukesh Rathor
2013-09-26 8:02 ` Jan Beulich
2013-09-27 0:17 ` Mukesh Rathor
2013-09-27 6:54 ` Jan Beulich
2013-10-03 0:53 ` Mukesh Rathor
2013-10-04 6:53 ` Jan Beulich
2013-10-04 13:35 ` Konrad Rzeszutek Wilk
2013-10-04 14:05 ` Jan Beulich
2013-10-04 16:02 ` Konrad Rzeszutek Wilk
2013-10-04 16:07 ` Jan Beulich
2013-10-04 20:59 ` Konrad Rzeszutek Wilk
2013-10-05 1:06 ` Mukesh Rathor
2013-10-07 7:12 ` Jan Beulich
2013-10-08 0:58 ` Mukesh Rathor
2013-10-08 7:51 ` Jan Beulich
2013-10-08 8:03 ` Jan Beulich
2013-10-08 9:39 ` George Dunlap
2013-10-08 9:57 ` Jan Beulich
2013-10-08 10:01 ` George Dunlap
2013-10-08 10:19 ` Lars Kurth
2013-10-08 12:30 ` Konrad Rzeszutek Wilk
2013-10-09 13:02 ` George Dunlap
2013-10-09 13:13 ` Andrew Cooper
2013-10-09 13:16 ` George Dunlap
2013-10-09 14:37 ` Andrew Cooper
2013-10-09 17:50 ` Tim Deegan
2013-10-09 22:31 ` Mukesh Rathor
2013-09-27 1:55 ` Mukesh Rathor
2013-09-27 7:01 ` Jan Beulich
2013-09-27 23:03 ` Mukesh Rathor
2013-09-30 6:56 ` Jan Beulich
2013-10-08 0:52 ` Mukesh Rathor
2013-10-08 7:43 ` Jan Beulich
2013-10-09 21:59 ` Mukesh Rathor
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1380142988-9487-3-git-send-email-mukesh.rathor@oracle.com \
--to=mukesh.rathor@oracle.com \
--cc=JBeulich@suse.com \
--cc=Xen-devel@lists.xensource.com \
--cc=keir.xen@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).