From: George Dunlap <george.dunlap@eu.citrix.com>
To: xen-devel@lists.xensource.com
Cc: george.dunlap@eu.citrix.com
Subject: [PATCH 1 of 4] p2m: Keep statistics on order of p2m entries
Date: Fri, 6 May 2011 15:01:18 +0100 [thread overview]
Message-ID: <be5d93d38f283329dea1.1304690478@elijah> (raw)
In-Reply-To: <patchbomb.1304690477@elijah>
Count the number of 4kiB, 2MiB, and 1GiB p2m entries.
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
diff -r 4b0692880dfa -r be5d93d38f28 xen/arch/x86/mm/hap/p2m-ept.c
--- a/xen/arch/x86/mm/hap/p2m-ept.c Thu May 05 17:40:34 2011 +0100
+++ b/xen/arch/x86/mm/hap/p2m-ept.c Fri May 06 15:01:08 2011 +0100
@@ -39,6 +39,8 @@
#define is_epte_present(ept_entry) ((ept_entry)->epte & 0x7)
#define is_epte_superpage(ept_entry) ((ept_entry)->sp)
+#define is_epte_countable(ept_entry) (is_epte_present(ept_entry) \
+ || ((ept_entry)->sa_p2mt == p2m_populate_on_demand))
/* Non-ept "lock-and-check" wrapper */
static int ept_pod_check_and_populate(struct p2m_domain *p2m, unsigned long gfn,
@@ -167,11 +169,14 @@
void ept_free_entry(struct p2m_domain *p2m, ept_entry_t *ept_entry, int level)
{
/* End if the entry is a leaf entry. */
- if ( level == 0 || !is_epte_present(ept_entry) ||
- is_epte_superpage(ept_entry) )
+ if ( level == 0 || !is_epte_present(ept_entry) || is_epte_superpage(ept_entry) )
+ {
+ if ( is_epte_countable(ept_entry) )
+ p2m->stats.entries[level]--;
return;
+ }
- if ( level > 1 )
+ if ( level > 0 )
{
ept_entry_t *epte = map_domain_page(ept_entry->mfn);
for ( int i = 0; i < EPT_PAGETABLE_ENTRIES; i++ )
@@ -217,7 +222,10 @@
ept_p2m_type_to_flags(epte, epte->sa_p2mt, epte->access);
if ( (level - 1) == target )
+ {
+ p2m->stats.entries[target]++;
continue;
+ }
ASSERT(is_epte_superpage(epte));
@@ -400,6 +408,10 @@
ept_p2m_type_to_flags(&new_entry, p2mt, p2ma);
}
+ /* old_entry will be handled by ept_free_entry below */
+ if ( is_epte_countable(&new_entry) )
+ p2m->stats.entries[i]++;
+
atomic_write_ept_entry(ept_entry, new_entry);
}
else
@@ -412,12 +424,16 @@
split_ept_entry = atomic_read_ept_entry(ept_entry);
+ /* Accounting should be OK here; split_ept_entry bump the counts,
+ * free_entry will reduce them. */
if ( !ept_split_super_page(p2m, &split_ept_entry, i, target) )
{
ept_free_entry(p2m, &split_ept_entry, i);
goto out;
}
+ /* We know this was countable or we wouldn't be here.*/
+ p2m->stats.entries[i]--;
/* now install the newly split ept sub-tree */
/* NB: please make sure domian is paused and no in-fly VT-d DMA. */
atomic_write_ept_entry(ept_entry, split_ept_entry);
@@ -449,9 +465,13 @@
ept_p2m_type_to_flags(&new_entry, p2mt, p2ma);
+ /* old_entry will be handled by ept_free_entry below */
+ if ( is_epte_countable(&new_entry) )
+ p2m->stats.entries[i]++;
+
atomic_write_ept_entry(ept_entry, new_entry);
}
-
+
/* Track the highest gfn for which we have ever had a valid mapping */
if ( mfn_valid(mfn_x(mfn)) &&
(gfn + (1UL << order) - 1 > p2m->max_mapped_pfn) )
diff -r 4b0692880dfa -r be5d93d38f28 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c Thu May 05 17:40:34 2011 +0100
+++ b/xen/arch/x86/mm/p2m.c Fri May 06 15:01:08 2011 +0100
@@ -184,11 +184,15 @@
{
/* End if the entry is a leaf entry. */
if ( page_order == 0
- || !(l1e_get_flags(*p2m_entry) & _PAGE_PRESENT)
+ || !(l1e_get_flags(*p2m_entry) & _PAGE_PRESENT)
|| (l1e_get_flags(*p2m_entry) & _PAGE_PSE) )
+ {
+ if ( l1e_get_flags(*p2m_entry) )
+ p2m->stats.entries[page_order/9]--;
return;
-
- if ( page_order > 9 )
+ }
+
+ if ( page_order )
{
l1_pgentry_t *l3_table = map_domain_page(l1e_get_pfn(*p2m_entry));
for ( int i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
@@ -242,6 +246,7 @@
new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)),
__PAGE_HYPERVISOR | _PAGE_USER);
+ /* Stats: Empty entry, no mods needed */
switch ( type ) {
case PGT_l3_page_table:
p2m_add_iommu_flags(&new_entry, 3, IOMMUF_readable|IOMMUF_writable);
@@ -285,10 +290,12 @@
{
new_entry = l1e_from_pfn(pfn + (i * L1_PAGETABLE_ENTRIES), flags);
p2m_add_iommu_flags(&new_entry, 1, IOMMUF_readable|IOMMUF_writable);
+ p2m->stats.entries[1]++;
p2m->write_p2m_entry(p2m, gfn,
l1_entry+i, *table_mfn, new_entry, 2);
}
unmap_domain_page(l1_entry);
+ p2m->stats.entries[2]--;
new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)),
__PAGE_HYPERVISOR|_PAGE_USER); //disable PSE
p2m_add_iommu_flags(&new_entry, 2, IOMMUF_readable|IOMMUF_writable);
@@ -320,6 +327,7 @@
{
new_entry = l1e_from_pfn(pfn + i, flags);
p2m_add_iommu_flags(&new_entry, 0, 0);
+ p2m->stats.entries[0]++;
p2m->write_p2m_entry(p2m, gfn,
l1_entry+i, *table_mfn, new_entry, 1);
}
@@ -328,6 +336,7 @@
new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)),
__PAGE_HYPERVISOR|_PAGE_USER);
p2m_add_iommu_flags(&new_entry, 1, IOMMUF_readable|IOMMUF_writable);
+ p2m->stats.entries[1]--;
p2m->write_p2m_entry(p2m, gfn,
p2m_entry, *table_mfn, new_entry, 2);
}
@@ -908,6 +917,15 @@
void
p2m_pod_dump_data(struct p2m_domain *p2m)
{
+ int i;
+ long entries;
+ printk(" P2M entry stats:\n");
+ for ( i=0; i<3; i++)
+ if ( (entries=p2m->stats.entries[i]) )
+ printk(" L%d: %8ld entries, %ld bytes\n",
+ i+1,
+ entries,
+ entries<<(i*9+12));
printk(" PoD entries=%d cachesize=%d\n",
p2m->pod.entry_count, p2m->pod.count);
}
@@ -1475,6 +1493,12 @@
old_mfn = l1e_get_pfn(*p2m_entry);
}
+ /* Adjust count for present/not-present entries added */
+ if ( l1e_get_flags(*p2m_entry) )
+ p2m->stats.entries[page_order/9]--;
+ if ( l1e_get_flags(entry_content) )
+ p2m->stats.entries[page_order/9]++;
+
p2m->write_p2m_entry(p2m, gfn, p2m_entry, table_mfn, entry_content, 3);
/* NB: paging_write_p2m_entry() handles tlb flushes properly */
@@ -1519,6 +1543,13 @@
p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
old_mfn = l1e_get_pfn(*p2m_entry);
}
+
+ /* Adjust count for present/not-present entries added */
+ if ( l1e_get_flags(*p2m_entry) )
+ p2m->stats.entries[page_order/9]--;
+ if ( l1e_get_flags(entry_content) )
+ p2m->stats.entries[page_order/9]++;
+
/* level 1 entry */
p2m->write_p2m_entry(p2m, gfn, p2m_entry, table_mfn, entry_content, 1);
/* NB: paging_write_p2m_entry() handles tlb flushes properly */
@@ -1556,6 +1587,12 @@
old_mfn = l1e_get_pfn(*p2m_entry);
}
+ /* Adjust count for present/not-present entries added */
+ if ( l1e_get_flags(*p2m_entry) )
+ p2m->stats.entries[page_order/9]--;
+ if ( l1e_get_flags(entry_content) )
+ p2m->stats.entries[page_order/9]++;
+
p2m->write_p2m_entry(p2m, gfn, p2m_entry, table_mfn, entry_content, 2);
/* NB: paging_write_p2m_entry() handles tlb flushes properly */
@@ -2750,6 +2787,8 @@
continue;
}
+ /* STATS: Should change only type; no stats should need adjustment */
+
l2mfn = _mfn(l3e_get_pfn(l3e[i3]));
l2e = map_domain_page(l3e_get_pfn(l3e[i3]));
for ( i2 = 0; i2 < L2_PAGETABLE_ENTRIES; i2++ )
diff -r 4b0692880dfa -r be5d93d38f28 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Thu May 05 17:40:34 2011 +0100
+++ b/xen/include/asm-x86/p2m.h Fri May 06 15:01:08 2011 +0100
@@ -278,6 +278,10 @@
unsigned reclaim_single; /* Last gpfn of a scan */
unsigned max_guest; /* gpfn of max guest demand-populate */
} pod;
+
+ struct {
+ long entries[3];
+ } stats;
};
/* get host p2m table */
next prev parent reply other threads:[~2011-05-06 14:01 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-05-06 14:01 [PATCH 0 of 4] Use superpages on restore/migrate George Dunlap
2011-05-06 14:01 ` George Dunlap [this message]
2011-05-06 14:23 ` [PATCH 1 of 4] p2m: Keep statistics on order of p2m entries Tim Deegan
2011-05-06 15:07 ` George Dunlap
2011-05-06 14:40 ` Christoph Egger
2011-05-06 15:00 ` Tim Deegan
2011-05-06 14:53 ` Christoph Egger
2011-05-06 15:34 ` George Dunlap
2011-05-09 8:27 ` Tim Deegan
2012-06-08 10:52 ` George Dunlap
2012-06-14 8:52 ` Tim Deegan
2011-05-06 15:14 ` George Dunlap
2011-05-06 14:01 ` [PATCH 2 of 4] tools: Detect superpages on domain restore George Dunlap
2011-05-06 14:01 ` [PATCH 3 of 4] tools: Save superpages in the same batch, to make detection easier George Dunlap
2011-05-06 14:01 ` [PATCH 4 of 4] tools: Introduce "allocate-only" page type for migration George Dunlap
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=be5d93d38f283329dea1.1304690478@elijah \
--to=george.dunlap@eu.citrix.com \
--cc=xen-devel@lists.xensource.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).