xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Jaeyong Yoo <jaeyong.yoo@samsung.com>
To: xen-devel@lists.xen.org
Cc: Jaeyong Yoo <jaeyong.yoo@samsung.com>
Subject: [PATCH v3 06/10] xen/arm: Implement virtual-linear page table for guest p2m mapping in live migration
Date: Thu, 01 Aug 2013 21:57:49 +0900	[thread overview]
Message-ID: <1375361873-32145-7-git-send-email-jaeyong.yoo@samsung.com> (raw)
In-Reply-To: <1375361873-32145-1-git-send-email-jaeyong.yoo@samsung.com>

Allocate and free the xen's virtual memory for virtual-linear page table of guest p2m.
Slotting the guest p2m into the hypervisor's own page tables,
such that the guest p2m table entries are available at known
virtual addresses. For more info, see:
http://www.technovelty.org/linux/virtual-linear-page-table.html

This function is used in dirty-page tracing: when domU write-fault is trapped by xen,
xen can immediately locate the p2m entry of the write-fault.

Signed-off-by: Jaeyong Yoo <jaeyong.yoo@samsung.com>
---
 xen/arch/arm/Makefile        |   1 +
 xen/arch/arm/setup.c         |   3 +
 xen/arch/arm/vlpt.c          | 162 +++++++++++++++++++++++++++++++++++++++++++
 xen/include/asm-arm/config.h |   3 +
 xen/include/asm-arm/vlpt.h   |  10 +++
 5 files changed, 179 insertions(+)
 create mode 100644 xen/arch/arm/vlpt.c
 create mode 100644 xen/include/asm-arm/vlpt.h

diff --git a/xen/arch/arm/Makefile b/xen/arch/arm/Makefile
index fa15412..86165e7 100644
--- a/xen/arch/arm/Makefile
+++ b/xen/arch/arm/Makefile
@@ -31,6 +31,7 @@ obj-y += vpl011.o
 obj-y += hvm.o
 obj-y += device.o
 obj-y += save.o
+obj-y += vlpt.o
 
 #obj-bin-y += ....o
 
diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
index 1ec5e38..27f0cca 100644
--- a/xen/arch/arm/setup.c
+++ b/xen/arch/arm/setup.c
@@ -35,6 +35,7 @@
 #include <xen/cpu.h>
 #include <xen/pfn.h>
 #include <xen/vmap.h>
+#include <asm/vlpt.h>
 #include <asm/page.h>
 #include <asm/current.h>
 #include <asm/setup.h>
@@ -447,6 +448,8 @@ void __init start_xen(unsigned long boot_phys_offset,
     dt_unflatten_host_device_tree();
     dt_irq_xlate = gic_irq_xlate;
 
+    vlpt_init();
+
     dt_uart_init();
     console_init_preirq();
 
diff --git a/xen/arch/arm/vlpt.c b/xen/arch/arm/vlpt.c
new file mode 100644
index 0000000..49b1887
--- /dev/null
+++ b/xen/arch/arm/vlpt.c
@@ -0,0 +1,162 @@
+#ifdef VIRT_LIN_P2M_START
+#include <xen/bitmap.h>
+#include <xen/cache.h>
+#include <xen/init.h>
+#include <xen/mm.h>
+#include <xen/pfn.h>
+#include <xen/spinlock.h>
+#include <xen/types.h>
+#include <asm/vlpt.h>
+#include <asm/page.h>
+#include <asm/early_printk.h>
+
+static DEFINE_SPINLOCK(vlpt_lock);
+static void *__read_mostly vlpt_base;
+#define vlpt_bitmap ((unsigned long *)vlpt_base)
+/* highest allocated bit in the bitmap */
+static unsigned int __read_mostly vlpt_top;
+/* total number of bits in the bitmap */
+static unsigned int __read_mostly vlpt_end;
+/* lowest known clear bit in the bitmap */
+static unsigned int vlpt_low;
+
+void __init vlpt_init(void)
+{
+    unsigned int i, nr;
+    unsigned long va;
+
+    vlpt_base = (void *)VIRT_LIN_P2M_START;
+    vlpt_end = PFN_DOWN((void *)VIRT_LIN_P2M_END - vlpt_base);
+    vlpt_low = PFN_UP((vlpt_end + 7) / 8);
+    nr = PFN_UP((vlpt_low + 7) / 8);
+    vlpt_top = nr * PAGE_SIZE * 8;
+
+    for ( i = 0, va = (unsigned long)vlpt_bitmap; i < nr; ++i, va += PAGE_SIZE )
+    {
+        struct page_info *pg = alloc_domheap_page(NULL, 0);
+
+        map_pages_to_xen(va, page_to_mfn(pg), 1, PAGE_HYPERVISOR);
+        clear_page((void *)va);
+    }
+    bitmap_fill(vlpt_bitmap, vlpt_low);
+    /* Populate page tables for the bitmap if necessary. */
+    map_pages_to_xen(va, 0, vlpt_low - nr, MAP_SMALL_PAGES);
+}
+
+void *vlpt_alloc(unsigned int nr, unsigned int align)
+{
+    unsigned int start, bit;
+
+    if ( !align )
+        align = 1;
+    else if ( align & (align - 1) )
+        align &= -align;
+
+    spin_lock(&vlpt_lock);
+    for ( ; ; )
+    {
+        struct page_info *pg;
+
+        ASSERT(!test_bit(vlpt_low, vlpt_bitmap));
+        for ( start = vlpt_low; ; )
+        {
+            bit = find_next_bit(vlpt_bitmap, vlpt_top, start + 1);
+            if ( bit > vlpt_top )
+                bit = vlpt_top;
+            /*
+             * Note that this skips the first bit, making the
+             * corresponding page a guard one.
+             */
+            start = (start + align) & ~(align - 1);
+            if ( start + nr <= bit )
+                break;
+            start = bit < vlpt_top ?
+                    find_next_zero_bit(vlpt_bitmap, vlpt_top, bit + 1) : bit;
+            if ( start >= vlpt_top )
+                break;
+        }
+
+        if ( start < vlpt_top )
+            break;
+
+        spin_unlock(&vlpt_lock);
+
+        if ( vlpt_top >= vlpt_end )
+            return NULL;
+
+        pg = alloc_domheap_page(NULL, 0);
+        if ( !pg )
+            return NULL;
+
+        spin_lock(&vlpt_lock);
+
+        if ( start >= vlpt_top )
+        {
+            unsigned long va = (unsigned long)vlpt_bitmap + vlpt_top / 8;
+
+            if ( !map_pages_to_xen(va, page_to_mfn(pg), 1, PAGE_HYPERVISOR) )
+            {
+                clear_page((void *)va);
+                vlpt_top += PAGE_SIZE * 8;
+                if ( vlpt_top > vlpt_end )
+                    vlpt_top = vlpt_end;
+                continue;
+            }
+        }
+
+        free_domheap_page(pg);
+
+        if ( start >= vlpt_top )
+        {
+            spin_unlock(&vlpt_lock);
+            return NULL;
+        }
+    }
+
+    for ( bit = start; bit < start + nr; ++bit )
+        __set_bit(bit, vlpt_bitmap);
+    if ( start <= vlpt_low + 2 )
+        vlpt_low = bit;
+    spin_unlock(&vlpt_lock);
+
+    return vlpt_base + start * PAGE_SIZE;
+}
+
+static unsigned int vlpt_index(const void *va)
+{
+    unsigned long addr = (unsigned long)va & ~(PAGE_SIZE - 1);
+    unsigned int idx;
+
+    if ( addr < VIRT_LIN_P2M_START + (vlpt_end / 8) ||
+         addr >= VIRT_LIN_P2M_START + vlpt_top * PAGE_SIZE )
+        return 0;
+
+    idx = PFN_DOWN(va - vlpt_base);
+    return !test_bit(idx - 1, vlpt_bitmap) &&
+           test_bit(idx, vlpt_bitmap) ? idx : 0;
+}
+
+void vlpt_free(const void *va)
+{
+    unsigned int bit = vlpt_index(va);
+
+    if ( !bit )
+    {
+        WARN_ON(va != NULL);
+        return;
+    }
+
+    spin_lock(&vlpt_lock);
+    if ( bit < vlpt_low )
+    {
+        vlpt_low = bit - 1;
+        while ( !test_bit(vlpt_low - 1, vlpt_bitmap) )
+            --vlpt_low;
+    }
+    while ( __test_and_clear_bit(bit, vlpt_bitmap) )
+        if ( ++bit == vlpt_top )
+            break;
+    spin_unlock(&vlpt_lock);
+}
+
+#endif
diff --git a/xen/include/asm-arm/config.h b/xen/include/asm-arm/config.h
index e3cfaf1..f9a7063 100644
--- a/xen/include/asm-arm/config.h
+++ b/xen/include/asm-arm/config.h
@@ -80,6 +80,7 @@
  *  6M  -  8M   Early boot misc (see below)
  *
  * 32M - 128M   Frametable: 24 bytes per page for 16GB of RAM
+ * 128M - 256M   Virtual-linear mapping to P2M table
  * 256M -  1G   VMAP: ioremap and early_ioremap use this virtual address
  *                    space
  *
@@ -95,12 +96,14 @@
 #define FIXMAP_ADDR(n)        (mk_unsigned_long(0x00400000) + (n) * PAGE_SIZE)
 #define BOOT_MISC_VIRT_START   mk_unsigned_long(0x00600000)
 #define FRAMETABLE_VIRT_START  mk_unsigned_long(0x02000000)
+#define VIRT_LIN_P2M_START     mk_unsigned_long(0x08000000)
 #define VMAP_VIRT_START        mk_unsigned_long(0x10000000)
 #define XENHEAP_VIRT_START     mk_unsigned_long(0x40000000)
 #define DOMHEAP_VIRT_START     mk_unsigned_long(0x80000000)
 #define DOMHEAP_VIRT_END       mk_unsigned_long(0xffffffff)
 
 #define VMAP_VIRT_END          XENHEAP_VIRT_START
+#define VIRT_LIN_P2M_END       VMAP_VIRT_START
 #define HYPERVISOR_VIRT_START  XEN_VIRT_START
 
 #define DOMHEAP_ENTRIES        1024  /* 1024 2MB mapping slots */
diff --git a/xen/include/asm-arm/vlpt.h b/xen/include/asm-arm/vlpt.h
new file mode 100644
index 0000000..da55293
--- /dev/null
+++ b/xen/include/asm-arm/vlpt.h
@@ -0,0 +1,10 @@
+#if !defined(__XEN_VLPT_H__) && defined(VIRT_LIN_P2M_START)
+#define __XEN_VLPT_H__
+
+#include <xen/types.h>
+
+void *vlpt_alloc(unsigned int nr, unsigned int align);
+void vlpt_free(const void *);
+void vlpt_init(void);
+
+#endif /* __XEN_VLPT_H__ */
-- 
1.8.1.2

  parent reply	other threads:[~2013-08-01 12:57 UTC|newest]

Thread overview: 31+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-08-01 12:57 [PATCH v3 00/10] xen/arm: live migration support in arndale board Jaeyong Yoo
2013-08-01 12:57 ` [PATCH v3 01/10] xen/arm: Implement hvm save and restore Jaeyong Yoo
2013-08-01 12:57 ` [PATCH v3 02/10] xen/arm: Add more registers for saving and restoring vcpu registers Jaeyong Yoo
2013-08-01 12:57 ` [PATCH v3 03/10] xen/arm: Implement set_memory_map hypercall Jaeyong Yoo
2013-08-01 12:57 ` [PATCH v3 04/10] xen/arm: Implement get_maximum_gpfn hypercall for arm Jaeyong Yoo
2013-08-01 12:57 ` [PATCH v3 05/10] xen/arm: Implement modify_returncode Jaeyong Yoo
2013-08-01 12:57 ` Jaeyong Yoo [this message]
2013-08-01 12:57 ` [PATCH v3 07/10] xen/arm: Add handling write fault for dirty-page tracing Jaeyong Yoo
2013-08-04 16:27   ` Stefano Stabellini
2013-08-05  0:23     ` Jaeyong Yoo
2013-08-05 11:11       ` Stefano Stabellini
2013-08-05 11:39         ` Jaeyong Yoo
2013-08-05 13:49           ` Stefano Stabellini
2013-08-05 13:52         ` Ian Campbell
2013-08-06 11:56           ` Jaeyong Yoo
2013-08-06 13:17             ` Ian Campbell
2013-08-07  1:24               ` Jaeyong Yoo
2013-08-15  4:24               ` Jaeyong Yoo
2013-08-17 22:16                 ` Ian Campbell
2013-08-17 22:21                   ` Ian Campbell
2013-08-20 10:15                   ` Jaeyong Yoo
2013-08-18  6:39                 ` Ian Campbell
2013-08-20 10:19                   ` Jaeyong Yoo
2013-08-17 23:51   ` Julien Grall
2013-08-20 10:16     ` Jaeyong Yoo
2013-08-01 12:57 ` [PATCH v3 08/10] xen/arm: Fixing clear_guest_offset macro Jaeyong Yoo
2013-08-01 12:57 ` [PATCH v3 09/10] xen/arm: Implement hypercall for dirty page tracing (shadow op) Jaeyong Yoo
2013-08-01 12:57 ` [PATCH v3 10/10] xen/arm: Implement toolstack for xl restore/save and migrate Jaeyong Yoo
2013-09-25 15:59 ` [PATCH v3 00/10] xen/arm: live migration support in arndale board Ian Campbell
2013-09-26  6:23   ` Jaeyong Yoo
2013-09-26 15:13     ` Ian Campbell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1375361873-32145-7-git-send-email-jaeyong.yoo@samsung.com \
    --to=jaeyong.yoo@samsung.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).