From: George Dunlap <dunlapg@umich.edu>
To: xen-devel@lists.xensource.com
Subject: [PATCH] Trace p2m events
Date: Mon, 1 Feb 2010 10:57:16 -0800 [thread overview]
Message-ID: <de76405a1002011057i283ca924v9f56a8b8612de426@mail.gmail.com> (raw)
[-- Attachment #1: Type: text/plain, Size: 260 bytes --]
Add more tracing to aid in debugging ballooning / PoD:
* Nested page faults for EPT/NPT systems
* set_p2m_enry
* Decrease reservation (for ballooning)
* PoD populate, zero reclaim, superpage splinter
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
[-- Attachment #2: 20100107-unstable-p2m-trace.diff --]
[-- Type: text/x-patch, Size: 6659 bytes --]
diff -r cba56c13ca3e xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Wed Jan 06 12:45:23 2010 +0000
+++ b/xen/arch/x86/hvm/svm/svm.c Thu Jan 07 17:42:08 2010 +0000
@@ -886,6 +886,22 @@
mfn_t mfn;
p2m_type_t p2mt;
+ if ( tb_init_done )
+ {
+ struct {
+ uint64_t gpa;
+ uint64_t mfn;
+ u32 qualification;
+ u32 p2mt;
+ } _d;
+
+ _d.gpa = gpa;
+ _d.qualification = 0;
+ _d.mfn = mfn_x(gfn_to_mfn_query(current->domain, gfn, &_d.p2mt));
+
+ __trace_var(TRC_HVM_NPF, 0, sizeof(_d), (unsigned char *)&_d);
+ }
+
if ( hvm_hap_nested_page_fault(gfn) )
return;
diff -r cba56c13ca3e xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c Wed Jan 06 12:45:23 2010 +0000
+++ b/xen/arch/x86/hvm/vmx/vmx.c Thu Jan 07 17:42:08 2010 +0000
@@ -2099,6 +2099,22 @@
mfn_t mfn;
p2m_type_t p2mt;
+ if ( tb_init_done )
+ {
+ struct {
+ uint64_t gpa;
+ uint64_t mfn;
+ u32 qualification;
+ u32 p2mt;
+ } _d;
+
+ _d.gpa = gpa;
+ _d.qualification = qualification;
+ _d.mfn = mfn_x(gfn_to_mfn_query(current->domain, gfn, &_d.p2mt));
+
+ __trace_var(TRC_HVM_NPF, 0, sizeof(_d), (unsigned char *)&_d);
+ }
+
if ( (qualification & EPT_GLA_VALID) &&
hvm_hap_nested_page_fault(gfn) )
return;
diff -r cba56c13ca3e xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c Wed Jan 06 12:45:23 2010 +0000
+++ b/xen/arch/x86/mm/p2m.c Thu Jan 07 17:42:08 2010 +0000
@@ -829,6 +829,21 @@
goto out_reset;
}
+ if ( tb_init_done )
+ {
+ struct {
+ u64 gfn, mfn;
+ int d:16,order:16;
+ } t;
+
+ t.gfn = gfn;
+ t.mfn = mfn_x(mfn);
+ t.d = d->domain_id;
+ t.order = 9;
+
+ __trace_var(TRC_MEM_POD_ZERO_RECLAIM, 0, sizeof(t), (unsigned char *)&t);
+ }
+
/* Finally! We've passed all the checks, and can add the mfn superpage
* back on the PoD cache, and account for the new p2m PoD entries */
p2m_pod_cache_add(d, mfn_to_page(mfn0), 9);
@@ -928,6 +943,21 @@
}
else
{
+ if ( tb_init_done )
+ {
+ struct {
+ u64 gfn, mfn;
+ int d:16,order:16;
+ } t;
+
+ t.gfn = gfns[i];
+ t.mfn = mfn_x(mfns[i]);
+ t.d = d->domain_id;
+ t.order = 0;
+
+ __trace_var(TRC_MEM_POD_ZERO_RECLAIM, 0, sizeof(t), (unsigned char *)&t);
+ }
+
/* Add to cache, and account for the new p2m PoD entry */
p2m_pod_cache_add(d, mfn_to_page(mfns[i]), 0);
d->arch.p2m->pod.entry_count++;
@@ -1073,6 +1103,21 @@
p2md->pod.entry_count -= (1 << order); /* Lock: p2m */
BUG_ON(p2md->pod.entry_count < 0);
+ if ( tb_init_done )
+ {
+ struct {
+ u64 gfn, mfn;
+ int d:16,order:16;
+ } t;
+
+ t.gfn = gfn;
+ t.mfn = mfn_x(mfn);
+ t.d = d->domain_id;
+ t.order = order;
+
+ __trace_var(TRC_MEM_POD_POPULATE, 0, sizeof(t), (unsigned char *)&t);
+ }
+
return 0;
out_of_memory:
spin_unlock(&d->page_alloc_lock);
@@ -1091,6 +1136,18 @@
for(i=0; i<(1<<order); i++)
set_p2m_entry(d, gfn_aligned+i, _mfn(POPULATE_ON_DEMAND_MFN), 0,
p2m_populate_on_demand);
+ if ( tb_init_done )
+ {
+ struct {
+ u64 gfn;
+ int d:16;
+ } t;
+
+ t.gfn = gfn;
+ t.d = d->domain_id;
+
+ __trace_var(TRC_MEM_POD_SUPERPAGE_SPLINTER, 0, sizeof(t), (unsigned char *)&t);
+ }
return 0;
}
@@ -1141,6 +1198,23 @@
l2_pgentry_t l2e_content;
int rv=0;
+ if ( tb_init_done )
+ {
+ struct {
+ u64 gfn, mfn;
+ int p2mt;
+ int d:16,order:16;
+ } t;
+
+ t.gfn = gfn;
+ t.mfn = mfn_x(mfn);
+ t.p2mt = p2mt;
+ t.d = d->domain_id;
+ t.order = page_order;
+
+ __trace_var(TRC_MEM_SET_P2M_ENTRY, 0, sizeof(t), (unsigned char *)&t);
+ }
+
#if CONFIG_PAGING_LEVELS >= 4
if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn,
L4_PAGETABLE_SHIFT - PAGE_SHIFT,
@@ -1225,7 +1299,7 @@
/* Success */
rv = 1;
- out:
+out:
unmap_domain_page(table);
return rv;
}
diff -r cba56c13ca3e xen/common/memory.c
--- a/xen/common/memory.c Wed Jan 06 12:45:23 2010 +0000
+++ b/xen/common/memory.c Thu Jan 07 17:42:08 2010 +0000
@@ -28,6 +28,7 @@
#include <xen/numa.h>
#include <public/memory.h>
#include <xsm/xsm.h>
+#include <xen/trace.h>
struct memop_args {
/* INPUT */
@@ -222,6 +223,20 @@
if ( unlikely(__copy_from_guest_offset(&gmfn, a->extent_list, i, 1)) )
goto out;
+ if ( tb_init_done )
+ {
+ struct {
+ u64 gfn;
+ int d:16,order:16;
+ } t;
+
+ t.gfn = gmfn;
+ t.d = a->domain->domain_id;
+ t.order = a->extent_order;
+
+ __trace_var(TRC_MEM_DECREASE_RESERVATION, 0, sizeof(t), (unsigned char *)&t);
+ }
+
/* See if populate-on-demand wants to handle this */
if ( is_hvm_domain(a->domain)
&& p2m_pod_decrease_reservation(a->domain, gmfn, a->extent_order) )
diff -r cba56c13ca3e xen/include/public/trace.h
--- a/xen/include/public/trace.h Wed Jan 06 12:45:23 2010 +0000
+++ b/xen/include/public/trace.h Thu Jan 07 17:42:08 2010 +0000
@@ -82,6 +82,12 @@
#define TRC_MEM_PAGE_GRANT_MAP (TRC_MEM + 1)
#define TRC_MEM_PAGE_GRANT_UNMAP (TRC_MEM + 2)
#define TRC_MEM_PAGE_GRANT_TRANSFER (TRC_MEM + 3)
+#define TRC_MEM_SET_P2M_ENTRY (TRC_MEM + 4)
+#define TRC_MEM_DECREASE_RESERVATION (TRC_MEM + 5)
+#define TRC_MEM_POD_POPULATE (TRC_MEM + 16)
+#define TRC_MEM_POD_ZERO_RECLAIM (TRC_MEM + 17)
+#define TRC_MEM_POD_SUPERPAGE_SPLINTER (TRC_MEM + 18)
+
#define TRC_PV_HYPERCALL (TRC_PV + 1)
#define TRC_PV_TRAP (TRC_PV + 3)
@@ -149,6 +155,8 @@
#define TRC_HVM_LMSW (TRC_HVM_HANDLER + 0x19)
#define TRC_HVM_LMSW64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x19)
#define TRC_HVM_INTR_WINDOW (TRC_HVM_HANDLER + 0x20)
+#define TRC_HVM_NPF (TRC_HVM_HANDLER + 0x21)
+
#define TRC_HVM_IOPORT_WRITE (TRC_HVM_HANDLER + 0x216)
#define TRC_HVM_IOMEM_WRITE (TRC_HVM_HANDLER + 0x217)
[-- Attachment #3: Type: text/plain, Size: 138 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
reply other threads:[~2010-02-01 18:57 UTC|newest]
Thread overview: [no followups] expand[flat|nested] mbox.gz Atom feed
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=de76405a1002011057i283ca924v9f56a8b8612de426@mail.gmail.com \
--to=dunlapg@umich.edu \
--cc=xen-devel@lists.xensource.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).