From: Mukesh Rathor <mukesh.rathor@oracle.com>
To: Xen-devel@lists.xensource.com
Cc: keir.xen@gmail.com, tim@xen.org, JBeulich@suse.com
Subject: [V1 PATCH 05/11] PVH dom0: move some pv specific code to static functions
Date: Fri, 8 Nov 2013 17:23:30 -0800 [thread overview]
Message-ID: <1383960215-22444-6-git-send-email-mukesh.rathor@oracle.com> (raw)
In-Reply-To: <1383960215-22444-1-git-send-email-mukesh.rathor@oracle.com>
In this preparatory patch also, some pv specific code is
carved out into static functions. No functionality change.
Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
---
xen/arch/x86/domain_build.c | 353 +++++++++++++++++++++++--------------------
1 files changed, 192 insertions(+), 161 deletions(-)
diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
index 232adf8..c9ff680 100644
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -307,6 +307,191 @@ static void __init process_dom0_ioports_disable(void)
}
}
+/* Pages that are part of page tables must be read only. */
+static __init void mark_pv_pt_pages_rdonly(struct domain *d,
+ l4_pgentry_t *l4start,
+ unsigned long vpt_start,
+ unsigned long nr_pt_pages)
+{
+ unsigned long count;
+ struct page_info *page;
+ l4_pgentry_t *pl4e;
+ l3_pgentry_t *pl3e;
+ l2_pgentry_t *pl2e;
+ l1_pgentry_t *pl1e;
+
+ pl4e = l4start + l4_table_offset(vpt_start);
+ pl3e = l4e_to_l3e(*pl4e);
+ pl3e += l3_table_offset(vpt_start);
+ pl2e = l3e_to_l2e(*pl3e);
+ pl2e += l2_table_offset(vpt_start);
+ pl1e = l2e_to_l1e(*pl2e);
+ pl1e += l1_table_offset(vpt_start);
+ for ( count = 0; count < nr_pt_pages; count++ )
+ {
+ l1e_remove_flags(*pl1e, _PAGE_RW);
+ page = mfn_to_page(l1e_get_pfn(*pl1e));
+
+ /* Read-only mapping + PGC_allocated + page-table page. */
+ page->count_info = PGC_allocated | 3;
+ page->u.inuse.type_info |= PGT_validated | 1;
+
+ /* Top-level p.t. is pinned. */
+ if ( (page->u.inuse.type_info & PGT_type_mask) ==
+ (!is_pv_32on64_domain(d) ?
+ PGT_l4_page_table : PGT_l3_page_table) )
+ {
+ page->count_info += 1;
+ page->u.inuse.type_info += 1 | PGT_pinned;
+ }
+
+ /* Iterate. */
+ if ( !((unsigned long)++pl1e & (PAGE_SIZE - 1)) )
+ {
+ if ( !((unsigned long)++pl2e & (PAGE_SIZE - 1)) )
+ {
+ if ( !((unsigned long)++pl3e & (PAGE_SIZE - 1)) )
+ pl3e = l4e_to_l3e(*++pl4e);
+ pl2e = l3e_to_l2e(*pl3e);
+ }
+ pl1e = l2e_to_l1e(*pl2e);
+ }
+ }
+}
+
+/* Set up the phys->machine table if not part of the initial mapping. */
+static __init void setup_pv_physmap(struct domain *d, unsigned long pgtbl_pfn,
+ unsigned long v_start, unsigned long v_end,
+ unsigned long vphysmap_start,
+ unsigned long vphysmap_end,
+ unsigned long nr_pages)
+{
+ struct page_info *page = NULL;
+ l4_pgentry_t *pl4e = NULL, *l4start;
+ l3_pgentry_t *pl3e = NULL;
+ l2_pgentry_t *pl2e = NULL;
+ l1_pgentry_t *pl1e = NULL;
+
+ l4start = map_domain_page(pgtbl_pfn);
+
+ if ( v_start <= vphysmap_end && vphysmap_start <= v_end )
+ panic("DOM0 P->M table overlaps initial mapping");
+
+ while ( vphysmap_start < vphysmap_end )
+ {
+ if ( d->tot_pages + ((round_pgup(vphysmap_end) - vphysmap_start)
+ >> PAGE_SHIFT) + 3 > nr_pages )
+ panic("Dom0 allocation too small for initial P->M table.\n");
+
+ if ( pl1e )
+ {
+ unmap_domain_page(pl1e);
+ pl1e = NULL;
+ }
+ if ( pl2e )
+ {
+ unmap_domain_page(pl2e);
+ pl2e = NULL;
+ }
+ if ( pl3e )
+ {
+ unmap_domain_page(pl3e);
+ pl3e = NULL;
+ }
+ pl4e = l4start + l4_table_offset(vphysmap_start);
+ if ( !l4e_get_intpte(*pl4e) )
+ {
+ page = alloc_domheap_page(d, 0);
+ if ( !page )
+ break;
+
+ /* No mapping, PGC_allocated + page-table page. */
+ page->count_info = PGC_allocated | 2;
+ page->u.inuse.type_info = PGT_l3_page_table | PGT_validated | 1;
+ pl3e = __map_domain_page(page);
+ clear_page(pl3e);
+ *pl4e = l4e_from_page(page, L4_PROT);
+ } else
+ pl3e = map_domain_page(l4e_get_pfn(*pl4e));
+
+ pl3e += l3_table_offset(vphysmap_start);
+ if ( !l3e_get_intpte(*pl3e) )
+ {
+ if ( cpu_has_page1gb &&
+ !(vphysmap_start & ((1UL << L3_PAGETABLE_SHIFT) - 1)) &&
+ vphysmap_end >= vphysmap_start + (1UL << L3_PAGETABLE_SHIFT) &&
+ (page = alloc_domheap_pages(d,
+ L3_PAGETABLE_SHIFT - PAGE_SHIFT,
+ 0)) != NULL )
+ {
+ *pl3e = l3e_from_page(page, L1_PROT|_PAGE_DIRTY|_PAGE_PSE);
+ vphysmap_start += 1UL << L3_PAGETABLE_SHIFT;
+ continue;
+ }
+ if ( (page = alloc_domheap_page(d, 0)) == NULL )
+ break;
+
+ /* No mapping, PGC_allocated + page-table page. */
+ page->count_info = PGC_allocated | 2;
+ page->u.inuse.type_info = PGT_l2_page_table | PGT_validated | 1;
+ pl2e = __map_domain_page(page);
+ clear_page(pl2e);
+ *pl3e = l3e_from_page(page, L3_PROT);
+ }
+ else
+ pl2e = map_domain_page(l3e_get_pfn(*pl3e));
+
+ pl2e += l2_table_offset(vphysmap_start);
+ if ( !l2e_get_intpte(*pl2e) )
+ {
+ if ( !(vphysmap_start & ((1UL << L2_PAGETABLE_SHIFT) - 1)) &&
+ vphysmap_end >= vphysmap_start + (1UL << L2_PAGETABLE_SHIFT) &&
+ (page = alloc_domheap_pages(d,
+ L2_PAGETABLE_SHIFT - PAGE_SHIFT,
+ 0)) != NULL )
+ {
+ *pl2e = l2e_from_page(page, L1_PROT|_PAGE_DIRTY|_PAGE_PSE);
+ if ( opt_allow_superpage )
+ get_superpage(page_to_mfn(page), d);
+ vphysmap_start += 1UL << L2_PAGETABLE_SHIFT;
+ continue;
+ }
+ if ( (page = alloc_domheap_page(d, 0)) == NULL )
+ break;
+
+ /* No mapping, PGC_allocated + page-table page. */
+ page->count_info = PGC_allocated | 2;
+ page->u.inuse.type_info = PGT_l1_page_table | PGT_validated | 1;
+ pl1e = __map_domain_page(page);
+ clear_page(pl1e);
+ *pl2e = l2e_from_page(page, L2_PROT);
+ }
+ else
+ pl1e = map_domain_page(l2e_get_pfn(*pl2e));
+
+ pl1e += l1_table_offset(vphysmap_start);
+ BUG_ON(l1e_get_intpte(*pl1e));
+ page = alloc_domheap_page(d, 0);
+ if ( !page )
+ break;
+
+ *pl1e = l1e_from_page(page, L1_PROT|_PAGE_DIRTY);
+ vphysmap_start += PAGE_SIZE;
+ vphysmap_start &= PAGE_MASK;
+ }
+ if ( !page )
+ panic("Not enough RAM for DOM0 P->M table.\n");
+
+ if ( pl1e )
+ unmap_domain_page(pl1e);
+ if ( pl2e )
+ unmap_domain_page(pl2e);
+ if ( pl3e )
+ unmap_domain_page(pl3e);
+
+ unmap_domain_page(l4start);
+}
+
int __init construct_dom0(
struct domain *d,
const module_t *image, unsigned long image_headroom,
@@ -705,44 +890,8 @@ int __init construct_dom0(
COMPAT_L2_PAGETABLE_XEN_SLOTS(d) * sizeof(*l2tab));
}
- /* Pages that are part of page tables must be read only. */
- l4tab = l4start + l4_table_offset(vpt_start);
- l3start = l3tab = l4e_to_l3e(*l4tab);
- l3tab += l3_table_offset(vpt_start);
- l2start = l2tab = l3e_to_l2e(*l3tab);
- l2tab += l2_table_offset(vpt_start);
- l1start = l1tab = l2e_to_l1e(*l2tab);
- l1tab += l1_table_offset(vpt_start);
- for ( count = 0; count < nr_pt_pages; count++ )
- {
- l1e_remove_flags(*l1tab, _PAGE_RW);
- page = mfn_to_page(l1e_get_pfn(*l1tab));
-
- /* Read-only mapping + PGC_allocated + page-table page. */
- page->count_info = PGC_allocated | 3;
- page->u.inuse.type_info |= PGT_validated | 1;
-
- /* Top-level p.t. is pinned. */
- if ( (page->u.inuse.type_info & PGT_type_mask) ==
- (!is_pv_32on64_domain(d) ?
- PGT_l4_page_table : PGT_l3_page_table) )
- {
- page->count_info += 1;
- page->u.inuse.type_info += 1 | PGT_pinned;
- }
-
- /* Iterate. */
- if ( !((unsigned long)++l1tab & (PAGE_SIZE - 1)) )
- {
- if ( !((unsigned long)++l2tab & (PAGE_SIZE - 1)) )
- {
- if ( !((unsigned long)++l3tab & (PAGE_SIZE - 1)) )
- l3start = l3tab = l4e_to_l3e(*++l4tab);
- l2start = l2tab = l3e_to_l2e(*l3tab);
- }
- l1start = l1tab = l2e_to_l1e(*l2tab);
- }
- }
+ if ( is_pv_domain(d) )
+ mark_pv_pt_pages_rdonly(d, l4start, vpt_start, nr_pt_pages);
/* Mask all upcalls... */
for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ )
@@ -814,132 +963,14 @@ int __init construct_dom0(
elf_64bit(&elf) ? 64 : 32, parms.pae ? "p" : "");
count = d->tot_pages;
- l4start = map_domain_page(pagetable_get_pfn(v->arch.guest_table));
- l3tab = NULL;
- l2tab = NULL;
- l1tab = NULL;
- /* Set up the phys->machine table if not part of the initial mapping. */
- if ( parms.p2m_base != UNSET_ADDR )
- {
- unsigned long va = vphysmap_start;
- if ( v_start <= vphysmap_end && vphysmap_start <= v_end )
- panic("DOM0 P->M table overlaps initial mapping");
-
- while ( va < vphysmap_end )
- {
- if ( d->tot_pages + ((round_pgup(vphysmap_end) - va)
- >> PAGE_SHIFT) + 3 > nr_pages )
- panic("Dom0 allocation too small for initial P->M table.\n");
-
- if ( l1tab )
- {
- unmap_domain_page(l1tab);
- l1tab = NULL;
- }
- if ( l2tab )
- {
- unmap_domain_page(l2tab);
- l2tab = NULL;
- }
- if ( l3tab )
- {
- unmap_domain_page(l3tab);
- l3tab = NULL;
- }
- l4tab = l4start + l4_table_offset(va);
- if ( !l4e_get_intpte(*l4tab) )
- {
- page = alloc_domheap_page(d, 0);
- if ( !page )
- break;
- /* No mapping, PGC_allocated + page-table page. */
- page->count_info = PGC_allocated | 2;
- page->u.inuse.type_info =
- PGT_l3_page_table | PGT_validated | 1;
- l3tab = __map_domain_page(page);
- clear_page(l3tab);
- *l4tab = l4e_from_page(page, L4_PROT);
- } else
- l3tab = map_domain_page(l4e_get_pfn(*l4tab));
- l3tab += l3_table_offset(va);
- if ( !l3e_get_intpte(*l3tab) )
- {
- if ( cpu_has_page1gb &&
- !(va & ((1UL << L3_PAGETABLE_SHIFT) - 1)) &&
- vphysmap_end >= va + (1UL << L3_PAGETABLE_SHIFT) &&
- (page = alloc_domheap_pages(d,
- L3_PAGETABLE_SHIFT -
- PAGE_SHIFT,
- 0)) != NULL )
- {
- *l3tab = l3e_from_page(page,
- L1_PROT|_PAGE_DIRTY|_PAGE_PSE);
- va += 1UL << L3_PAGETABLE_SHIFT;
- continue;
- }
- if ( (page = alloc_domheap_page(d, 0)) == NULL )
- break;
- /* No mapping, PGC_allocated + page-table page. */
- page->count_info = PGC_allocated | 2;
- page->u.inuse.type_info =
- PGT_l2_page_table | PGT_validated | 1;
- l2tab = __map_domain_page(page);
- clear_page(l2tab);
- *l3tab = l3e_from_page(page, L3_PROT);
- }
- else
- l2tab = map_domain_page(l3e_get_pfn(*l3tab));
- l2tab += l2_table_offset(va);
- if ( !l2e_get_intpte(*l2tab) )
- {
- if ( !(va & ((1UL << L2_PAGETABLE_SHIFT) - 1)) &&
- vphysmap_end >= va + (1UL << L2_PAGETABLE_SHIFT) &&
- (page = alloc_domheap_pages(d,
- L2_PAGETABLE_SHIFT -
- PAGE_SHIFT,
- 0)) != NULL )
- {
- *l2tab = l2e_from_page(page,
- L1_PROT|_PAGE_DIRTY|_PAGE_PSE);
- if ( opt_allow_superpage )
- get_superpage(page_to_mfn(page), d);
- va += 1UL << L2_PAGETABLE_SHIFT;
- continue;
- }
- if ( (page = alloc_domheap_page(d, 0)) == NULL )
- break;
- /* No mapping, PGC_allocated + page-table page. */
- page->count_info = PGC_allocated | 2;
- page->u.inuse.type_info =
- PGT_l1_page_table | PGT_validated | 1;
- l1tab = __map_domain_page(page);
- clear_page(l1tab);
- *l2tab = l2e_from_page(page, L2_PROT);
- }
- else
- l1tab = map_domain_page(l2e_get_pfn(*l2tab));
- l1tab += l1_table_offset(va);
- BUG_ON(l1e_get_intpte(*l1tab));
- page = alloc_domheap_page(d, 0);
- if ( !page )
- break;
- *l1tab = l1e_from_page(page, L1_PROT|_PAGE_DIRTY);
- va += PAGE_SIZE;
- va &= PAGE_MASK;
- }
- if ( !page )
- panic("Not enough RAM for DOM0 P->M table.\n");
+ if ( is_pv_domain(d) && parms.p2m_base != UNSET_ADDR )
+ {
+ pfn = pagetable_get_pfn(v->arch.guest_table);
+ setup_pv_physmap(d, pfn, v_start, v_end, vphysmap_start, vphysmap_end,
+ nr_pages);
}
- if ( l1tab )
- unmap_domain_page(l1tab);
- if ( l2tab )
- unmap_domain_page(l2tab);
- if ( l3tab )
- unmap_domain_page(l3tab);
- unmap_domain_page(l4start);
-
/* Write the phys->machine and machine->phys table entries. */
for ( pfn = 0; pfn < count; pfn++ )
{
--
1.7.2.3
next prev parent reply other threads:[~2013-11-09 1:23 UTC|newest]
Thread overview: 33+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-11-09 1:23 [V1 PATCH 0/11]: PVH dom0 Mukesh Rathor
2013-11-09 1:23 ` [V1 PATCH 01/11] PVH dom0: set eflags resvd bit #1 Mukesh Rathor
2013-11-12 16:19 ` Konrad Rzeszutek Wilk
2013-11-12 16:24 ` Jan Beulich
2013-11-12 16:31 ` Konrad Rzeszutek Wilk
2013-11-09 1:23 ` [V1 PATCH 02/11] PVH dom0: Allow physdevops for PVH dom0 Mukesh Rathor
2013-11-09 1:23 ` [V1 PATCH 03/11] PVH dom0: iommu related changes Mukesh Rathor
2013-11-12 16:08 ` Jan Beulich
2013-11-15 1:43 ` Mukesh Rathor
2013-11-15 7:36 ` Jan Beulich
2013-11-09 1:23 ` [V1 PATCH 04/11] PVH dom0: create update_memory_mapping() function Mukesh Rathor
2013-11-12 16:12 ` Jan Beulich
2013-11-15 1:59 ` Mukesh Rathor
2013-11-15 7:38 ` Jan Beulich
2013-11-09 1:23 ` Mukesh Rathor [this message]
2013-11-09 1:23 ` [V1 PATCH 06/11] PVH dom0: construct_dom0 changes Mukesh Rathor
2013-11-12 16:13 ` Konrad Rzeszutek Wilk
2013-11-15 2:21 ` Mukesh Rathor
2013-11-15 7:59 ` Jan Beulich
2013-11-12 16:35 ` Jan Beulich
2013-11-15 2:34 ` Mukesh Rathor
2013-11-15 7:40 ` Jan Beulich
2013-11-09 1:23 ` [V1 PATCH 07/11] PVH dom0: implement XENMEM_add_to_physmap_range for x86 Mukesh Rathor
2013-11-12 16:46 ` Jan Beulich
2013-11-09 1:23 ` [V1 PATCH 08/11] PVH dom0: Introduce p2m_map_foreign Mukesh Rathor
2013-11-12 16:16 ` Konrad Rzeszutek Wilk
2013-11-09 1:23 ` [V1 PATCH 09/11] PVH dom0: Add and remove foreign pages Mukesh Rathor
2013-11-12 16:58 ` Jan Beulich
2013-11-09 1:23 ` [V1 PATCH 10/11] PVH dom0: add opt_dom0pvh to setup.c Mukesh Rathor
2013-11-12 16:18 ` Konrad Rzeszutek Wilk
2013-11-14 11:38 ` [V1 PATCH 0/11]: PVH dom0 Roger Pau Monné
2013-11-14 22:42 ` Mukesh Rathor
2013-11-15 7:55 ` Jan Beulich
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1383960215-22444-6-git-send-email-mukesh.rathor@oracle.com \
--to=mukesh.rathor@oracle.com \
--cc=JBeulich@suse.com \
--cc=Xen-devel@lists.xensource.com \
--cc=keir.xen@gmail.com \
--cc=tim@xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).