xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
To: xen-devel@lists.xen.org
Cc: sstabellini@kernel.org, wei.liu2@citrix.com,
	George.Dunlap@eu.citrix.com, andrew.cooper3@citrix.com,
	ian.jackson@eu.citrix.com, tim@xen.org, jbeulich@suse.com,
	Boris Ostrovsky <boris.ostrovsky@oracle.com>
Subject: [PATCH v2 9/9] mm: Make sure pages are scrubbed
Date: Mon,  3 Apr 2017 12:50:56 -0400	[thread overview]
Message-ID: <1491238256-5517-10-git-send-email-boris.ostrovsky@oracle.com> (raw)
In-Reply-To: <1491238256-5517-1-git-send-email-boris.ostrovsky@oracle.com>

Add a debug Kconfig option that will make page allocator verify
that pages that were supposed to be scrubbed are, in fact, clean.

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
---
Changes in v2:
* Check full page for having been scrubbed

Note that poison_one_page() may be unnecesary since chances that a dirty page
starts with 0xc2c2c2c2c2c2c2c2 are fairly low.

 xen/Kconfig.debug       |    7 ++++++
 xen/common/page_alloc.c |   49 ++++++++++++++++++++++++++++++++++++++++++++++-
 2 files changed, 55 insertions(+), 1 deletions(-)

diff --git a/xen/Kconfig.debug b/xen/Kconfig.debug
index 689f297..f3bf9a9 100644
--- a/xen/Kconfig.debug
+++ b/xen/Kconfig.debug
@@ -114,6 +114,13 @@ config DEVICE_TREE_DEBUG
 	  logged in the Xen ring buffer.
 	  If unsure, say N here.
 
+config SCRUB_DEBUG
+    bool "Page scrubbing test"
+    default DEBUG
+    ---help---
+      Verify that pages that need to be scrubbed before being allocated to
+      a guest are indeed scrubbed.
+
 endif # DEBUG || EXPERT
 
 endmenu
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 8273102..b82aa51 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -699,6 +699,31 @@ static void page_list_add_scrub(struct page_info *pg, unsigned int node,
         page_list_add(pg, &heap(node, zone, order));
 }
 
+#define SCRUB_BYTE_PATTERN 0xc2c2c2c2c2c2c2c2
+#ifdef CONFIG_SCRUB_DEBUG
+static void poison_one_page(struct page_info *pg)
+{
+    mfn_t mfn = _mfn(page_to_mfn(pg));
+    uint64_t *ptr;
+
+    ptr = map_domain_page(mfn);
+    *ptr = ~SCRUB_BYTE_PATTERN;
+    unmap_domain_page(ptr);
+}
+
+static void check_one_page(struct page_info *pg)
+{
+    mfn_t mfn = _mfn(page_to_mfn(pg));
+    uint64_t *ptr;
+    unsigned i;
+
+    ptr = map_domain_page(mfn);
+    for ( i = 0; i < PAGE_SIZE / sizeof (*ptr); i++ )
+        ASSERT(ptr[i] == SCRUB_BYTE_PATTERN);
+    unmap_domain_page(ptr);
+}
+#endif /* CONFIG_SCRUB_DEBUG */
+
 static void check_and_stop_scrub(struct page_info *head)
 {
     if ( head->u.free.scrub_state & PAGE_SCRUBBING )
@@ -913,6 +938,11 @@ static struct page_info *alloc_heap_pages(
          * guest can control its own visibility of/through the cache.
          */
         flush_page_to_ram(page_to_mfn(&pg[i]));
+
+#ifdef CONFIG_SCRUB_DEBUG
+        if ( d && !is_idle_domain(d) )
+            check_one_page(&pg[i]);
+#endif
     }
 
     spin_unlock(&heap_lock);
@@ -1294,6 +1324,11 @@ static void free_heap_pages(
     {
         pg->count_info |= PGC_need_scrub;
         node_need_scrub[node] += (1UL << order);
+
+#ifdef CONFIG_SCRUB_DEBUG
+        for ( i = 0; i < (1 << order); i++ )
+            poison_one_page(&pg[i]);
+#endif
     }
 
     pg = merge_chunks(pg, node, zone, order, false);
@@ -1590,6 +1625,14 @@ static void init_heap_pages(
             nr_pages -= n;
         }
 
+#ifdef CONFIG_SCRUB_DEBUG
+        /*
+         * These pages get into heap and are allocated to dom0 before
+         * boot scrub happens.
+         * Not scrubbing them here will cause failure in check_one_page().
+         */
+        scrub_one_page(pg + i);
+#endif
         free_heap_pages(pg + i, 0, 0);
     }
 }
@@ -2123,6 +2166,9 @@ void free_domheap_pages(struct page_info *pg, unsigned int order)
             {
                 BUG_ON((pg[i].u.inuse.type_info & PGT_count_mask) != 0);
                 arch_free_heap_page(d, &pg[i]);
+#ifdef CONFIG_SCRUB_DEBUG
+                scrub_one_page(&pg[i]);
+#endif
             }
 
             drop_dom_ref = !domain_adjust_tot_pages(d, -(1 << order));
@@ -2226,7 +2272,8 @@ void scrub_one_page(struct page_info *pg)
 
 #ifndef NDEBUG
     /* Avoid callers relying on allocations returning zeroed pages. */
-    unmap_domain_page(memset(__map_domain_page(pg), 0xc2, PAGE_SIZE));
+    unmap_domain_page(memset(__map_domain_page(pg),
+                             SCRUB_BYTE_PATTERN & 0xff, PAGE_SIZE));
 #else
     /* For a production build, clear_page() is the fastest way to scrub. */
     clear_domain_page(_mfn(page_to_mfn(pg)));
-- 
1.7.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

  parent reply	other threads:[~2017-04-03 16:50 UTC|newest]

Thread overview: 31+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-04-03 16:50 [PATCH v2 0/9] Memory scrubbing from idle loop Boris Ostrovsky
2017-04-03 16:50 ` [PATCH v2 1/9] mm: Separate free page chunk merging into its own routine Boris Ostrovsky
2017-04-04 11:16   ` Jan Beulich
2017-04-04 13:48     ` Boris Ostrovsky
2017-04-04 14:01       ` Jan Beulich
2017-04-04 14:23         ` Boris Ostrovsky
2017-04-03 16:50 ` [PATCH v2 2/9] mm: Place unscrubbed pages at the end of pagelist Boris Ostrovsky
2017-04-04 14:46   ` Jan Beulich
2017-04-04 15:14     ` Boris Ostrovsky
2017-04-04 15:29       ` Jan Beulich
2017-04-04 15:39         ` Boris Ostrovsky
2017-04-04 15:50           ` Jan Beulich
2017-04-04 16:22             ` Boris Ostrovsky
2017-04-03 16:50 ` [PATCH v2 3/9] mm: Scrub pages in alloc_heap_pages() if needed Boris Ostrovsky
2017-04-03 16:50 ` [PATCH v2 4/9] mm: Scrub memory from idle loop Boris Ostrovsky
2017-04-12 16:11   ` Jan Beulich
2017-04-03 16:50 ` [PATCH v2 5/9] mm: Do not discard already-scrubbed pages softirqs are pending Boris Ostrovsky
2017-04-13 15:41   ` Jan Beulich
2017-04-13 16:46     ` Boris Ostrovsky
2017-04-03 16:50 ` [PATCH v2 6/9] spinlock: Introduce spin_lock_cb() Boris Ostrovsky
2017-04-13 15:46   ` Jan Beulich
2017-04-13 16:55     ` Boris Ostrovsky
2017-04-18  6:49       ` Jan Beulich
2017-04-18 12:32         ` Boris Ostrovsky
2017-04-18 12:43           ` Jan Beulich
2017-04-18 13:14             ` Boris Ostrovsky
2017-04-03 16:50 ` [PATCH v2 7/9] mm: Keep pages available for allocation while scrubbing Boris Ostrovsky
2017-04-13 15:59   ` Jan Beulich
2017-04-03 16:50 ` [PATCH v2 8/9] mm: Print number of unscrubbed pages in 'H' debug handler Boris Ostrovsky
2017-04-03 16:50 ` Boris Ostrovsky [this message]
2017-04-04 15:21 ` [PATCH v2 0/9] Memory scrubbing from idle loop George Dunlap

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1491238256-5517-10-git-send-email-boris.ostrovsky@oracle.com \
    --to=boris.ostrovsky@oracle.com \
    --cc=George.Dunlap@eu.citrix.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=sstabellini@kernel.org \
    --cc=tim@xen.org \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).