xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
To: xen-devel@lists.xen.org
Cc: sstabellini@kernel.org, wei.liu2@citrix.com,
	George.Dunlap@eu.citrix.com, andrew.cooper3@citrix.com,
	ian.jackson@eu.citrix.com, tim@xen.org, jbeulich@suse.com,
	Boris Ostrovsky <boris.ostrovsky@oracle.com>
Subject: [PATCH v5 8/8] mm: Make sure pages are scrubbed
Date: Thu, 22 Jun 2017 14:57:10 -0400	[thread overview]
Message-ID: <1498157830-21845-9-git-send-email-boris.ostrovsky@oracle.com> (raw)
In-Reply-To: <1498157830-21845-1-git-send-email-boris.ostrovsky@oracle.com>

Add a debug Kconfig option that will make page allocator verify
that pages that were supposed to be scrubbed are, in fact, clean.

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
---
Changes in v5:
* Defined SCRUB_PATTERN for NDEBUG
* Style chages


 xen/Kconfig.debug       |  7 ++++++
 xen/common/page_alloc.c | 63 ++++++++++++++++++++++++++++++++++++++++++++++++-
 2 files changed, 69 insertions(+), 1 deletion(-)

diff --git a/xen/Kconfig.debug b/xen/Kconfig.debug
index 689f297..195d504 100644
--- a/xen/Kconfig.debug
+++ b/xen/Kconfig.debug
@@ -114,6 +114,13 @@ config DEVICE_TREE_DEBUG
 	  logged in the Xen ring buffer.
 	  If unsure, say N here.
 
+config SCRUB_DEBUG
+	bool "Page scrubbing test"
+	default DEBUG
+	---help---
+	  Verify that pages that need to be scrubbed before being allocated to
+	  a guest are indeed scrubbed.
+
 endif # DEBUG || EXPERT
 
 endmenu
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index da5ffc2..5d50c2a 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -170,6 +170,10 @@ boolean_param("bootscrub", opt_bootscrub);
 static unsigned long __initdata opt_bootscrub_chunk = MB(128);
 size_param("bootscrub_chunk", opt_bootscrub_chunk);
 
+#ifdef CONFIG_SCRUB_DEBUG
+static bool __read_mostly boot_scrub_done;
+#endif
+
 /*
  * Bit width of the DMA heap -- used to override NUMA-node-first.
  * allocation strategy, which can otherwise exhaust low memory.
@@ -695,6 +699,43 @@ static void page_list_add_scrub(struct page_info *pg, unsigned int node,
         page_list_add(pg, &heap(node, zone, order));
 }
 
+/* SCRUB_PATTERN needs to be a repeating series of bytes. */
+#ifndef NDEBUG
+#define SCRUB_PATTERN        0xc2c2c2c2c2c2c2c2ULL
+#else
+#define SCRUB_PATTERN        0ULL
+#endif
+#define SCRUB_BYTE_PATTERN   (SCRUB_PATTERN & 0xff)
+
+static void poison_one_page(struct page_info *pg)
+{
+#ifdef CONFIG_SCRUB_DEBUG
+    mfn_t mfn = _mfn(page_to_mfn(pg));
+    uint64_t *ptr;
+
+    ptr = map_domain_page(mfn);
+    *ptr = ~SCRUB_PATTERN;
+    unmap_domain_page(ptr);
+#endif
+}
+
+static void check_one_page(struct page_info *pg)
+{
+#ifdef CONFIG_SCRUB_DEBUG
+    mfn_t mfn = _mfn(page_to_mfn(pg));
+    const uint64_t *ptr;
+    unsigned int i;
+
+    if ( !boot_scrub_done )
+        return;
+
+    ptr = map_domain_page(mfn);
+    for ( i = 0; i < PAGE_SIZE / sizeof (*ptr); i++ )
+        ASSERT(ptr[i] == SCRUB_PATTERN);
+    unmap_domain_page(ptr);
+#endif
+}
+
 static void check_and_stop_scrub(struct page_info *head)
 {
     if ( head->u.free.scrub_state == BUDDY_SCRUBBING )
@@ -931,6 +972,9 @@ static struct page_info *alloc_heap_pages(
          * guest can control its own visibility of/through the cache.
          */
         flush_page_to_ram(page_to_mfn(&pg[i]), !(memflags & MEMF_no_icache_flush));
+
+        if ( !(memflags & MEMF_no_scrub) )
+            check_one_page(&pg[i]);
     }
 
     spin_unlock(&heap_lock);
@@ -1294,7 +1338,10 @@ static void free_heap_pages(
         set_gpfn_from_mfn(mfn + i, INVALID_M2P_ENTRY);
 
         if ( need_scrub )
+        {
             pg[i].count_info |= PGC_need_scrub;
+            poison_one_page(&pg[i]);
+        }
     }
 
     avail[node][zone] += 1 << order;
@@ -1656,7 +1703,12 @@ static void init_heap_pages(
             nr_pages -= n;
         }
 
+#ifndef CONFIG_SCRUB_DEBUG
         free_heap_pages(pg + i, 0, false);
+#else
+        free_heap_pages(pg + i, 0, boot_scrub_done);
+#endif
+	
     }
 }
 
@@ -1922,6 +1974,10 @@ void __init scrub_heap_pages(void)
 
     printk("done.\n");
 
+#ifdef CONFIG_SCRUB_DEBUG
+    boot_scrub_done = true;
+#endif
+
     /* Now that the heap is initialized, run checks and set bounds
      * for the low mem virq algorithm. */
     setup_low_mem_virq();
@@ -2195,12 +2251,16 @@ void free_domheap_pages(struct page_info *pg, unsigned int order)
 
             spin_unlock_recursive(&d->page_alloc_lock);
 
+#ifndef CONFIG_SCRUB_DEBUG
             /*
              * Normally we expect a domain to clear pages before freeing them,
              * if it cares about the secrecy of their contents. However, after
              * a domain has died we assume responsibility for erasure.
              */
             scrub = !!d->is_dying;
+#else
+            scrub = true;
+#endif
         }
         else
         {
@@ -2292,7 +2352,8 @@ void scrub_one_page(struct page_info *pg)
 
 #ifndef NDEBUG
     /* Avoid callers relying on allocations returning zeroed pages. */
-    unmap_domain_page(memset(__map_domain_page(pg), 0xc2, PAGE_SIZE));
+    unmap_domain_page(memset(__map_domain_page(pg),
+                             SCRUB_BYTE_PATTERN, PAGE_SIZE));
 #else
     /* For a production build, clear_page() is the fastest way to scrub. */
     clear_domain_page(_mfn(page_to_mfn(pg)));
-- 
1.8.3.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

  parent reply	other threads:[~2017-06-22 18:57 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-06-22 18:57 [PATCH v5 0/8] Memory scrubbing from idle loop Boris Ostrovsky
2017-06-22 18:57 ` [PATCH v5 1/8] mm: Place unscrubbed pages at the end of pagelist Boris Ostrovsky
2017-06-27 17:06   ` Jan Beulich
2017-07-23  2:00     ` Boris Ostrovsky
2017-07-31 14:45       ` Jan Beulich
2017-07-31 16:03         ` Boris Ostrovsky
2017-08-02  9:24           ` Jan Beulich
2017-08-02 15:31             ` Boris Ostrovsky
2017-06-22 18:57 ` [PATCH v5 2/8] mm: Extract allocation loop from alloc_heap_pages() Boris Ostrovsky
2017-06-27 17:59   ` Jan Beulich
2017-06-22 18:57 ` [PATCH v5 3/8] mm: Scrub pages in alloc_heap_pages() if needed Boris Ostrovsky
2017-06-27 18:00   ` Jan Beulich
2017-07-23  2:07     ` Boris Ostrovsky
2017-07-31 15:16       ` Jan Beulich
2017-07-31 16:07         ` Boris Ostrovsky
2017-06-22 18:57 ` [PATCH v5 4/8] mm: Scrub memory from idle loop Boris Ostrovsky
2017-06-23  8:36   ` Dario Faggioli
2017-06-27 18:01   ` Jan Beulich
2017-07-23  2:14     ` Boris Ostrovsky
2017-07-31 15:20       ` Jan Beulich
2017-07-31 16:15         ` Boris Ostrovsky
2017-08-02  9:27           ` Jan Beulich
2017-06-22 18:57 ` [PATCH v5 5/8] spinlock: Introduce spin_lock_cb() Boris Ostrovsky
2017-06-22 18:57 ` [PATCH v5 6/8] mm: Keep heap accessible to others while scrubbing Boris Ostrovsky
2017-06-27 19:28   ` Jan Beulich
2017-06-27 19:31     ` Jan Beulich
2017-07-23  2:28     ` Boris Ostrovsky
2017-08-02  8:34       ` Jan Beulich
2017-06-22 18:57 ` [PATCH v5 7/8] mm: Print number of unscrubbed pages in 'H' debug handler Boris Ostrovsky
2017-06-22 18:57 ` Boris Ostrovsky [this message]
2017-06-27 19:29   ` [PATCH v5 8/8] mm: Make sure pages are scrubbed Jan Beulich
2017-06-23  9:36 ` [PATCH v5 0/8] Memory scrubbing from idle loop Jan Beulich
2017-06-23 13:11   ` Boris Ostrovsky
2017-06-23 13:22     ` Jan Beulich
2017-06-23 13:29       ` Boris Ostrovsky

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1498157830-21845-9-git-send-email-boris.ostrovsky@oracle.com \
    --to=boris.ostrovsky@oracle.com \
    --cc=George.Dunlap@eu.citrix.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=sstabellini@kernel.org \
    --cc=tim@xen.org \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).