From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
To: xen-devel@lists.xen.org
Cc: sstabellini@kernel.org, wei.liu2@citrix.com,
George.Dunlap@eu.citrix.com, andrew.cooper3@citrix.com,
ian.jackson@eu.citrix.com, tim@xen.org, jbeulich@suse.com,
Boris Ostrovsky <boris.ostrovsky@oracle.com>
Subject: [PATCH v6 8/8] mm: Make sure pages are scrubbed
Date: Fri, 4 Aug 2017 13:05:46 -0400 [thread overview]
Message-ID: <1501866346-9774-9-git-send-email-boris.ostrovsky@oracle.com> (raw)
In-Reply-To: <1501866346-9774-1-git-send-email-boris.ostrovsky@oracle.com>
Add a debug Kconfig option that will make page allocator verify
that pages that were supposed to be scrubbed are, in fact, clean.
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
---
xen/Kconfig.debug | 7 ++++++
xen/common/page_alloc.c | 63 ++++++++++++++++++++++++++++++++++++++++++++++++-
2 files changed, 69 insertions(+), 1 deletion(-)
diff --git a/xen/Kconfig.debug b/xen/Kconfig.debug
index 689f297..195d504 100644
--- a/xen/Kconfig.debug
+++ b/xen/Kconfig.debug
@@ -114,6 +114,13 @@ config DEVICE_TREE_DEBUG
logged in the Xen ring buffer.
If unsure, say N here.
+config SCRUB_DEBUG
+ bool "Page scrubbing test"
+ default DEBUG
+ ---help---
+ Verify that pages that need to be scrubbed before being allocated to
+ a guest are indeed scrubbed.
+
endif # DEBUG || EXPERT
endmenu
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 7cd736c..aac1ff2 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -170,6 +170,10 @@ boolean_param("bootscrub", opt_bootscrub);
static unsigned long __initdata opt_bootscrub_chunk = MB(128);
size_param("bootscrub_chunk", opt_bootscrub_chunk);
+#ifdef CONFIG_SCRUB_DEBUG
+static bool __read_mostly boot_scrub_done;
+#endif
+
/*
* Bit width of the DMA heap -- used to override NUMA-node-first.
* allocation strategy, which can otherwise exhaust low memory.
@@ -698,6 +702,43 @@ static void page_list_add_scrub(struct page_info *pg, unsigned int node,
page_list_add(pg, &heap(node, zone, order));
}
+/* SCRUB_PATTERN needs to be a repeating series of bytes. */
+#ifndef NDEBUG
+#define SCRUB_PATTERN 0xc2c2c2c2c2c2c2c2ULL
+#else
+#define SCRUB_PATTERN 0ULL
+#endif
+#define SCRUB_BYTE_PATTERN (SCRUB_PATTERN & 0xff)
+
+static void poison_one_page(struct page_info *pg)
+{
+#ifdef CONFIG_SCRUB_DEBUG
+ mfn_t mfn = _mfn(page_to_mfn(pg));
+ uint64_t *ptr;
+
+ ptr = map_domain_page(mfn);
+ *ptr = ~SCRUB_PATTERN;
+ unmap_domain_page(ptr);
+#endif
+}
+
+static void check_one_page(struct page_info *pg)
+{
+#ifdef CONFIG_SCRUB_DEBUG
+ mfn_t mfn = _mfn(page_to_mfn(pg));
+ const uint64_t *ptr;
+ unsigned int i;
+
+ if ( !boot_scrub_done )
+ return;
+
+ ptr = map_domain_page(mfn);
+ for ( i = 0; i < PAGE_SIZE / sizeof (*ptr); i++ )
+ ASSERT(ptr[i] == SCRUB_PATTERN);
+ unmap_domain_page(ptr);
+#endif
+}
+
static void check_and_stop_scrub(struct page_info *head)
{
if ( head->u.free.scrub_state == BUDDY_SCRUBBING )
@@ -932,6 +973,9 @@ static struct page_info *alloc_heap_pages(
* guest can control its own visibility of/through the cache.
*/
flush_page_to_ram(page_to_mfn(&pg[i]), !(memflags & MEMF_no_icache_flush));
+
+ if ( !(memflags & MEMF_no_scrub) )
+ check_one_page(&pg[i]);
}
spin_unlock(&heap_lock);
@@ -1306,7 +1350,10 @@ static void free_heap_pages(
set_gpfn_from_mfn(mfn + i, INVALID_M2P_ENTRY);
if ( need_scrub )
+ {
pg[i].count_info |= PGC_need_scrub;
+ poison_one_page(&pg[i]);
+ }
}
avail[node][zone] += 1 << order;
@@ -1664,7 +1711,12 @@ static void init_heap_pages(
nr_pages -= n;
}
+#ifndef CONFIG_SCRUB_DEBUG
free_heap_pages(pg + i, 0, false);
+#else
+ free_heap_pages(pg + i, 0, boot_scrub_done);
+#endif
+
}
}
@@ -1930,6 +1982,10 @@ void __init scrub_heap_pages(void)
printk("done.\n");
+#ifdef CONFIG_SCRUB_DEBUG
+ boot_scrub_done = true;
+#endif
+
/* Now that the heap is initialized, run checks and set bounds
* for the low mem virq algorithm. */
setup_low_mem_virq();
@@ -2203,12 +2259,16 @@ void free_domheap_pages(struct page_info *pg, unsigned int order)
spin_unlock_recursive(&d->page_alloc_lock);
+#ifndef CONFIG_SCRUB_DEBUG
/*
* Normally we expect a domain to clear pages before freeing them,
* if it cares about the secrecy of their contents. However, after
* a domain has died we assume responsibility for erasure.
*/
scrub = !!d->is_dying;
+#else
+ scrub = true;
+#endif
}
else
{
@@ -2300,7 +2360,8 @@ void scrub_one_page(struct page_info *pg)
#ifndef NDEBUG
/* Avoid callers relying on allocations returning zeroed pages. */
- unmap_domain_page(memset(__map_domain_page(pg), 0xc2, PAGE_SIZE));
+ unmap_domain_page(memset(__map_domain_page(pg),
+ SCRUB_BYTE_PATTERN, PAGE_SIZE));
#else
/* For a production build, clear_page() is the fastest way to scrub. */
clear_domain_page(_mfn(page_to_mfn(pg)));
--
1.8.3.1
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
prev parent reply other threads:[~2017-08-04 17:05 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-08-04 17:05 [PATCH v6 0/8] Memory scrubbing from idle loop Boris Ostrovsky
2017-08-04 17:05 ` [PATCH v6 1/8] mm: Place unscrubbed pages at the end of pagelist Boris Ostrovsky
2017-08-06 17:41 ` Jan Beulich
2017-08-07 14:12 ` Boris Ostrovsky
2017-08-07 14:37 ` Jan Beulich
2017-08-07 14:55 ` Boris Ostrovsky
2017-08-07 15:28 ` Jan Beulich
2017-08-07 10:45 ` Julien Grall
2017-08-07 14:46 ` Boris Ostrovsky
2017-08-07 15:23 ` Julien Grall
2017-08-07 16:57 ` Boris Ostrovsky
2017-08-07 17:01 ` Julien Grall
2017-08-07 17:20 ` Boris Ostrovsky
2017-08-04 17:05 ` [PATCH v6 2/8] mm: Extract allocation loop from alloc_heap_pages() Boris Ostrovsky
2017-08-06 17:42 ` Jan Beulich
2017-08-04 17:05 ` [PATCH v6 3/8] mm: Scrub pages in alloc_heap_pages() if needed Boris Ostrovsky
2017-08-04 17:05 ` [PATCH v6 4/8] mm: Scrub memory from idle loop Boris Ostrovsky
2017-08-07 7:29 ` Jan Beulich
2017-08-07 14:05 ` Dario Faggioli
2017-08-04 17:05 ` [PATCH v6 5/8] spinlock: Introduce spin_lock_cb() Boris Ostrovsky
2017-08-07 7:32 ` Jan Beulich
2017-08-04 17:05 ` [PATCH v6 6/8] mm: Keep heap accessible to others while scrubbing Boris Ostrovsky
2017-08-07 7:50 ` Jan Beulich
2017-08-04 17:05 ` [PATCH v6 7/8] mm: Print number of unscrubbed pages in 'H' debug handler Boris Ostrovsky
2017-08-04 17:05 ` Boris Ostrovsky [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1501866346-9774-9-git-send-email-boris.ostrovsky@oracle.com \
--to=boris.ostrovsky@oracle.com \
--cc=George.Dunlap@eu.citrix.com \
--cc=andrew.cooper3@citrix.com \
--cc=ian.jackson@eu.citrix.com \
--cc=jbeulich@suse.com \
--cc=sstabellini@kernel.org \
--cc=tim@xen.org \
--cc=wei.liu2@citrix.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).