xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v3 0/6] xen: arm: various improvements to boot time page table handling
@ 2014-07-21 12:58 Ian Campbell
  2014-07-21 12:59 ` [PATCH v3 1/6] xen: arm: correct whitespace/comments and use #defines in head.S Ian Campbell
  0 siblings, 1 reply; 13+ messages in thread
From: Ian Campbell @ 2014-07-21 12:58 UTC (permalink / raw)
  To: xen-devel; +Cc: Tim Deegan, Julien Grall, Stefano Stabellini

Just a few cleanups suggested by Julien and Andrew for v3 this time.

This lets us load at a 4K aligned address, which is what we get from the
Juno firmware.

Ian.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH v3 1/6] xen: arm: correct whitespace/comments and use #defines in head.S
  2014-07-21 12:58 [PATCH v3 0/6] xen: arm: various improvements to boot time page table handling Ian Campbell
@ 2014-07-21 12:59 ` Ian Campbell
  2014-07-21 12:59   ` [PATCH v3 2/6] xen: arm: Handle 4K aligned hypervisor load address Ian Campbell
                     ` (4 more replies)
  0 siblings, 5 replies; 13+ messages in thread
From: Ian Campbell @ 2014-07-21 12:59 UTC (permalink / raw)
  To: xen-devel; +Cc: julien.grall, tim, Ian Campbell, stefano.stabellini

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Julien Grall <julien.grall@linaro.org>
---
 xen/arch/arm/arm32/head.S |   11 +++++------
 xen/arch/arm/arm64/head.S |   15 +++++++--------
 2 files changed, 12 insertions(+), 14 deletions(-)

diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
index 73b97cb..51501dc 100644
--- a/xen/arch/arm/arm32/head.S
+++ b/xen/arch/arm/arm32/head.S
@@ -266,10 +266,10 @@ cpu_init_done:
         strd  r2, r3, [r4, #0]       /* Map it in slot 0 */
 
         /* ... map of paddr(start) in boot_pgtable */
-        lsrs  r1, r9, #30            /* Offset of base paddr in boot_pgtable */
+        lsrs  r1, r9, #FIRST_SHIFT   /* Offset of base paddr in boot_pgtable */
         beq   1f                     /* If it is in slot 0 then map in boot_second
                                       * later on */
-        lsl   r2, r1, #30            /* Base address for 1GB mapping */
+        lsl   r2, r1, #FIRST_SHIFT   /* Base address for 1GB mapping */
         orr   r2, r2, #PT_UPPER(MEM) /* r2:r3 := section map */
         orr   r2, r2, #PT_LOWER(MEM)
         lsl   r1, r1, #3             /* r1 := Slot offset */
@@ -277,7 +277,7 @@ cpu_init_done:
 
 1:      /* Setup boot_second: */
         ldr   r4, =boot_second
-        add   r4, r4, r10            /* r1 := paddr (boot_second) */
+        add   r4, r4, r10            /* r4 := paddr (boot_second) */
 
         lsr   r2, r9, #SECOND_SHIFT  /* Base address for 2MB mapping */
         lsl   r2, r2, #SECOND_SHIFT
@@ -320,7 +320,7 @@ paging:
         dsb
 #if defined(CONFIG_EARLY_PRINTK) /* Fixmap is only used by early printk */
         /* Non-boot CPUs don't need to rebuild the fixmap itself, just
-	 * the mapping from boot_second to xen_fixmap */
+         * the mapping from boot_second to xen_fixmap */
         teq   r12, #0
         bne   1f
 
@@ -408,8 +408,7 @@ launch:
         beq   start_xen              /* and disappear into the land of C */
         b     start_secondary        /* (to the appropriate entry point) */
 
-/* Fail-stop
- * r0: string explaining why */
+/* Fail-stop */
 fail:   PRINT("- Boot failed -\r\n")
 1:      wfe
         b     1b
diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S
index 7d53143..d46481b 100644
--- a/xen/arch/arm/arm64/head.S
+++ b/xen/arch/arm/arm64/head.S
@@ -267,11 +267,11 @@ skip_bss:
         str   x2, [x4, #0]           /* Map it in slot 0 */
 
         /* ... map of paddr(start) in boot_pgtable */
-        lsr   x1, x19, #39           /* Offset of base paddr in boot_pgtable */
+        lsr   x1, x19, #ZEROETH_SHIFT/* Offset of base paddr in boot_pgtable */
         cbz   x1, 1f                 /* It's in slot 0, map in boot_first
                                       * or boot_second later on */
 
-        lsl   x2, x1, #39            /* Base address for 512GB mapping */
+        lsl   x2, x1, #ZEROETH_SHIFT /* Base address for 512GB mapping */
         mov   x3, #PT_MEM            /* x2 := Section mapping */
         orr   x2, x2, x3
         lsl   x1, x1, #3             /* x1 := Slot offset */
@@ -284,7 +284,7 @@ skip_bss:
         /* ... map boot_second in boot_first[0] */
         ldr   x1, =boot_second
         add   x1, x1, x20            /* x1 := paddr(boot_second) */
-        mov   x3, #PT_PT             /* x2 := table map of boot_first */
+        mov   x3, #PT_PT             /* x2 := table map of boot_second */
         orr   x2, x1, x3             /*       + rights for linear PT */
         str   x2, [x4, #0]           /* Map it in slot 0 */
 
@@ -300,8 +300,8 @@ skip_bss:
         str   x2, [x4, x1]           /* Create mapping of paddr(start)*/
 
 1:      /* Setup boot_second: */
-        ldr   x4, =boot_second
-        add   x4, x4, x20            /* x4 := paddr (boot_second) */
+        ldr   x4, =boot_second       /* Next level into boot_second */
+        add   x4, x4, x20            /* x4 := paddr(boot_second) */
 
         lsr   x2, x19, #SECOND_SHIFT /* Base address for 2MB mapping */
         lsl   x2, x2, #SECOND_SHIFT
@@ -345,7 +345,7 @@ paging:
         dsb   sy
 #if defined(CONFIG_EARLY_PRINTK) /* Fixmap is only used by early printk */
         /* Non-boot CPUs don't need to rebuild the fixmap itself, just
-	 * the mapping from boot_second to xen_fixmap */
+         * the mapping from boot_second to xen_fixmap */
         cbnz  x22, 1f
 
         /* Add UART to the fixmap table */
@@ -428,8 +428,7 @@ launch:
         cbz   x22, start_xen         /* and disappear into the land of C */
         b     start_secondary        /* (to the appropriate entry point) */
 
-/* Fail-stop
- * r0: string explaining why */
+/* Fail-stop */
 fail:   PRINT("- Boot failed -\r\n")
 1:      wfe
         b     1b
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH v3 2/6] xen: arm: Handle 4K aligned hypervisor load address.
  2014-07-21 12:59 ` [PATCH v3 1/6] xen: arm: correct whitespace/comments and use #defines in head.S Ian Campbell
@ 2014-07-21 12:59   ` Ian Campbell
  2014-07-21 13:20     ` Tim Deegan
  2014-07-21 14:16     ` Julien Grall
  2014-07-21 12:59   ` [PATCH v3 3/6] xen: arm: Do not use level 0 section mappings in boot page tables Ian Campbell
                     ` (3 subsequent siblings)
  4 siblings, 2 replies; 13+ messages in thread
From: Ian Campbell @ 2014-07-21 12:59 UTC (permalink / raw)
  To: xen-devel; +Cc: julien.grall, tim, Ian Campbell, stefano.stabellini

Currently the boot page tables map Xen at XEN_VIRT_START using a 2MB section
mapping. This means that the bootloader must load Xen at a 2MB aligned address.
Unfortunately this is not the case with UEFI on the Juno platform where Xen
fails to boot. Furthermore the Linux boot protocol (which Xen claims to adhere
to) does not have this restriction, therefore this is our bug and not the
bootloader's.

Fix this by adding third level pagetables to the boot time pagetables, allowing
us to map a Xen which is aligned only to a 4K boundary. This only affects the
boot time page tables since Xen will later relocate itself to a 2MB aligned
address. Strictly speaking the non-boot processors could make use of this and
use a section mapping, but it is simpler if all processors follow the same boot
path.

Strictly speaking the Linux boot protocol doesn't even require 4K alignment
(and apparently Linux can cope with this), but so far all bootloaders appear to
provide it, so support for this is left for another day.

In order to use LPAE_ENTRIES in head.S we need to define it in an asm friendly
way.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
---
v3: Use LPAE_ENTRY_MASK.
    Use "ldr rX, =XXX" to avoid opencoding the construction of the
    const.
    Update comment at start to reflect this change
v2: Use LPAE_ENTRIES and PAGE_SIZE
    Minor updates to the asm

update comment
---
 xen/arch/arm/arm32/head.S  |   62 ++++++++++++++++++++++++++++++++------------
 xen/arch/arm/arm64/head.S  |   62 +++++++++++++++++++++++++++++++-------------
 xen/arch/arm/mm.c          |    8 ++++--
 xen/include/asm-arm/page.h |    2 +-
 4 files changed, 97 insertions(+), 37 deletions(-)

diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
index 51501dc..9bc893f 100644
--- a/xen/arch/arm/arm32/head.S
+++ b/xen/arch/arm/arm32/head.S
@@ -26,6 +26,7 @@
 
 #define PT_PT     0xf7f /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=1 P=1 */
 #define PT_MEM    0xf7d /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=0 P=1 */
+#define PT_MEM_L3 0xf7f /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=1 P=1 */
 #define PT_DEV    0xe71 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=0 P=1 */
 #define PT_DEV_L3 0xe73 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=1 P=1 */
 
@@ -73,7 +74,7 @@
 
         /* This must be the very first address in the loaded image.
          * It should be linked at XEN_VIRT_START, and loaded at any
-         * 2MB-aligned address.  All of text+data+bss must fit in 2MB,
+         * 4K-aligned address.  All of text+data+bss must fit in 2MB,
          * or the initial pagetable code below will need adjustment. */
         .global start
 start:
@@ -258,11 +259,11 @@ cpu_init_done:
         /* Setup boot_pgtable: */
         ldr   r1, =boot_second
         add   r1, r1, r10            /* r1 := paddr (boot_second) */
-        mov   r3, #0x0
 
         /* ... map boot_second in boot_pgtable[0] */
         orr   r2, r1, #PT_UPPER(PT)  /* r2:r3 := table map of boot_second */
         orr   r2, r2, #PT_LOWER(PT)  /* (+ rights for linear PT) */
+        mov   r3, #0x0
         strd  r2, r3, [r4, #0]       /* Map it in slot 0 */
 
         /* ... map of paddr(start) in boot_pgtable */
@@ -279,31 +280,60 @@ cpu_init_done:
         ldr   r4, =boot_second
         add   r4, r4, r10            /* r4 := paddr (boot_second) */
 
-        lsr   r2, r9, #SECOND_SHIFT  /* Base address for 2MB mapping */
-        lsl   r2, r2, #SECOND_SHIFT
+        ldr   r1, =boot_third
+        add   r1, r1, r10            /* r1 := paddr (boot_third) */
+
+        /* ... map boot_third in boot_second[1] */
+        orr   r2, r1, #PT_UPPER(PT)  /* r2:r3 := table map of boot_third */
+        orr   r2, r2, #PT_LOWER(PT)  /* (+ rights for linear PT) */
+        mov   r3, #0x0
+        strd  r2, r3, [r4, #8]       /* Map it in slot 1 */
+
+        /* ... map of paddr(start) in boot_second */
+        lsr   r2, r9, #SECOND_SHIFT  /* Offset of base paddr in boot_second */
+        ldr   r3, =LPAE_ENTRY_MASK
+        and   r1, r2, r3
+        cmp   r1, #1
+        beq   virtphys_clash         /* It's in slot 1, which we cannot handle */
+
+        lsl   r2, r2, #SECOND_SHIFT  /* Base address for 2MB mapping */
         orr   r2, r2, #PT_UPPER(MEM) /* r2:r3 := section map */
         orr   r2, r2, #PT_LOWER(MEM)
+        mov   r3, #0x0
+        lsl   r1, r1, #3             /* r1 := Slot offset */
+        strd  r2, r3, [r4, r1]       /* Mapping of paddr(start) */
 
-        /* ... map of vaddr(start) in boot_second */
-        ldr   r1, =start
-        lsr   r1, #(SECOND_SHIFT - 3)   /* Slot for vaddr(start) */
-        strd  r2, r3, [r4, r1]       /* Map vaddr(start) */
+        /* Setup boot_third: */
+1:      ldr   r4, =boot_third
+        add   r4, r4, r10            /* r4 := paddr (boot_third) */
 
-        /* ... map of paddr(start) in boot_second */
-        lsrs  r1, r9, #30            /* Base paddr */
-        bne   1f                     /* If paddr(start) is not in slot 0
-                                      * then the mapping was done in
-                                      * boot_pgtable above */
+        lsr   r2, r9, #THIRD_SHIFT  /* Base address for 4K mapping */
+        lsl   r2, r2, #THIRD_SHIFT
+        orr   r2, r2, #PT_UPPER(MEM_L3) /* r2:r3 := map */
+        orr   r2, r2, #PT_LOWER(MEM_L3)
+        mov   r3, #0x0
 
-        mov   r1, r9, lsr #(SECOND_SHIFT - 3)   /* Slot for paddr(start) */
-        strd  r2, r3, [r4, r1]       /* Map Xen there */
-1:
+        /* ... map of vaddr(start) in boot_third */
+        mov   r1, #0
+1:      strd  r2, r3, [r4, r1]       /* Map vaddr(start) */
+        add   r2, r2, #PAGE_SIZE     /* Next page */
+        add   r1, r1, #8             /* Next slot */
+        cmp   r1, #(LPAE_ENTRIES<<3) /* 512*8-byte entries per page */
+        blo   1b
 
         /* Defer fixmap and dtb mapping until after paging enabled, to
          * avoid them clashing with the 1:1 mapping. */
 
         /* boot pagetable setup complete */
 
+        b     1f
+
+virtphys_clash:
+        /* Identity map clashes with boot_third, which we cannot handle yet */
+        PRINT("Unable to build boot page tables - virt and phys addresses clash.\r\n")
+        b     fail
+
+1:
         PRINT("- Turning on paging -\r\n")
 
         ldr   r1, =paging            /* Explicit vaddr, not RIP-relative */
diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S
index d46481b..7c04e5b 100644
--- a/xen/arch/arm/arm64/head.S
+++ b/xen/arch/arm/arm64/head.S
@@ -27,6 +27,7 @@
 
 #define PT_PT     0xf7f /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=1 P=1 */
 #define PT_MEM    0xf7d /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=0 P=1 */
+#define PT_MEM_L3 0xf7f /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=1 P=1 */
 #define PT_DEV    0xe71 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=0 P=1 */
 #define PT_DEV_L3 0xe73 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=1 P=1 */
 
@@ -95,7 +96,7 @@
          *
          * This must be the very first address in the loaded image.
          * It should be linked at XEN_VIRT_START, and loaded at any
-         * 2MB-aligned address.  All of text+data+bss must fit in 2MB,
+         * 4K-aligned address.  All of text+data+bss must fit in 2MB,
          * or the initial pagetable code below will need adjustment.
          */
 
@@ -274,8 +275,9 @@ skip_bss:
         lsl   x2, x1, #ZEROETH_SHIFT /* Base address for 512GB mapping */
         mov   x3, #PT_MEM            /* x2 := Section mapping */
         orr   x2, x2, x3
-        lsl   x1, x1, #3             /* x1 := Slot offset */
-        str   x2, [x4, x1]           /* Mapping of paddr(start)*/
+        and   x1, x1, #LPAE_ENTRY_MASK /* x1 := Slot offset */
+        lsl   x1, x1, #3
+        str   x2, [x4, x1]           /* Mapping of paddr(start) */
 
 1:      /* Setup boot_first: */
         ldr   x4, =boot_first        /* Next level into boot_first */
@@ -290,7 +292,7 @@ skip_bss:
 
         /* ... map of paddr(start) in boot_first */
         lsr   x2, x19, #FIRST_SHIFT  /* x2 := Offset of base paddr in boot_first */
-        and   x1, x2, 0x1ff          /* x1 := Slot to use */
+        and   x1, x2, #LPAE_ENTRY_MASK /* x1 := Slot to use */
         cbz   x1, 1f                 /* It's in slot 0, map in boot_second */
 
         lsl   x2, x2, #FIRST_SHIFT   /* Base address for 1GB mapping */
@@ -303,31 +305,55 @@ skip_bss:
         ldr   x4, =boot_second       /* Next level into boot_second */
         add   x4, x4, x20            /* x4 := paddr(boot_second) */
 
-        lsr   x2, x19, #SECOND_SHIFT /* Base address for 2MB mapping */
-        lsl   x2, x2, #SECOND_SHIFT
+        /* ... map boot_third in boot_second[1] */
+        ldr   x1, =boot_third
+        add   x1, x1, x20            /* x1 := paddr(boot_third) */
+        mov   x3, #PT_PT             /* x2 := table map of boot_third */
+        orr   x2, x1, x3             /*       + rights for linear PT */
+        str   x2, [x4, #8]           /* Map it in slot 1 */
+
+        /* ... map of paddr(start) in boot_second */
+        lsr   x2, x19, #SECOND_SHIFT /* x2 := Offset of base paddr in boot_second */
+        and   x1, x2, #LPAE_ENTRY_MASK /* x1 := Slot to use */
+        cmp   x1, #1
+        b.eq  virtphys_clash         /* It's in slot 1, which we cannot handle */
+
+        lsl   x2, x2, #SECOND_SHIFT  /* Base address for 2MB mapping */
         mov   x3, #PT_MEM            /* x2 := Section map */
         orr   x2, x2, x3
+        lsl   x1, x1, #3             /* x1 := Slot offset */
+        str   x2, [x4, x1]           /* Create mapping of paddr(start)*/
 
-        /* ... map of vaddr(start) in boot_second */
-        ldr   x1, =start
-        lsr   x1, x1, #(SECOND_SHIFT - 3)   /* Slot for vaddr(start) */
-        str   x2, [x4, x1]           /* Map vaddr(start) */
+1:      /* Setup boot_third: */
+        ldr   x4, =boot_third
+        add   x4, x4, x20            /* x4 := paddr (boot_third) */
 
-        /* ... map of paddr(start) in boot_second */
-        lsr   x1, x19, #FIRST_SHIFT  /* Base paddr */
-        cbnz  x1, 1f                 /* If paddr(start) is not in slot 0
-                                      * then the mapping was done in
-                                      * boot_pgtable or boot_first above */
+        lsr   x2, x19, #THIRD_SHIFT  /* Base address for 4K mapping */
+        lsl   x2, x2, #THIRD_SHIFT
+        mov   x3, #PT_MEM_L3         /* x2 := Section map */
+        orr   x2, x2, x3
 
-        lsr   x1, x19, #(SECOND_SHIFT - 3)  /* Slot for paddr(start) */
-        str   x2, [x4, x1]           /* Map Xen there */
-1:
+        /* ... map of vaddr(start) in boot_third */
+        mov   x1, xzr
+1:      str   x2, [x4, x1]           /* Map vaddr(start) */
+        add   x2, x2, #PAGE_SIZE     /* Next page */
+        add   x1, x1, #8             /* Next slot */
+        cmp   x1, #(LPAE_ENTRIES<<3) /* 512 entries per page */
+        b.lt  1b
 
         /* Defer fixmap and dtb mapping until after paging enabled, to
          * avoid them clashing with the 1:1 mapping. */
 
         /* boot pagetable setup complete */
 
+        b     1f
+
+virtphys_clash:
+        /* Identity map clashes with boot_third, which we cannot handle yet */
+        PRINT("Unable to build boot page tables - virt and phys addresses clash.\r\n")
+        b     fail
+
+1:
         PRINT("- Turning on paging -\r\n")
 
         ldr   x1, =paging            /* Explicit vaddr, not RIP-relative */
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 03a0533..fdc7c98 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -47,8 +47,9 @@ struct domain *dom_xen, *dom_io, *dom_cow;
  * to the CPUs own pagetables.
  *
  * These pagetables have a very simple structure. They include:
- *  - a 2MB mapping of xen at XEN_VIRT_START, boot_first and
- *    boot_second are used to populate the trie down to that mapping.
+ *  - 2MB worth of 4K mappings of xen at XEN_VIRT_START, boot_first and
+ *    boot_second are used to populate the tables down to boot_third
+ *    which contains the actual mapping.
  *  - a 1:1 mapping of xen at its current physical address. This uses a
  *    section mapping at whichever of boot_{pgtable,first,second}
  *    covers that physical address.
@@ -69,6 +70,7 @@ lpae_t boot_pgtable[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
 lpae_t boot_first[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
 #endif
 lpae_t boot_second[LPAE_ENTRIES]  __attribute__((__aligned__(4096)));
+lpae_t boot_third[LPAE_ENTRIES]  __attribute__((__aligned__(4096)));
 
 /* Main runtime page tables */
 
@@ -492,6 +494,8 @@ void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr)
 #endif
     memset(boot_second, 0x0, PAGE_SIZE);
     clean_and_invalidate_xen_dcache(boot_second);
+    memset(boot_third, 0x0, PAGE_SIZE);
+    clean_and_invalidate_xen_dcache(boot_third);
 
     /* Break up the Xen mapping into 4k pages and protect them separately. */
     for ( i = 0; i < LPAE_ENTRIES; i++ )
diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
index 113be5a..739038a 100644
--- a/xen/include/asm-arm/page.h
+++ b/xen/include/asm-arm/page.h
@@ -396,7 +396,7 @@ static inline int gva_to_ipa(vaddr_t va, paddr_t *paddr)
  */
 
 #define LPAE_SHIFT      9
-#define LPAE_ENTRIES    (1u << LPAE_SHIFT)
+#define LPAE_ENTRIES    (_AC(1,U) << LPAE_SHIFT)
 #define LPAE_ENTRY_MASK (LPAE_ENTRIES - 1)
 
 #define THIRD_SHIFT    (PAGE_SHIFT)
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH v3 3/6] xen: arm: Do not use level 0 section mappings in boot page tables.
  2014-07-21 12:59 ` [PATCH v3 1/6] xen: arm: correct whitespace/comments and use #defines in head.S Ian Campbell
  2014-07-21 12:59   ` [PATCH v3 2/6] xen: arm: Handle 4K aligned hypervisor load address Ian Campbell
@ 2014-07-21 12:59   ` Ian Campbell
  2014-07-21 12:59   ` [PATCH v3 4/6] xen: arm: avoid unnecessary additional " Ian Campbell
                     ` (2 subsequent siblings)
  4 siblings, 0 replies; 13+ messages in thread
From: Ian Campbell @ 2014-07-21 12:59 UTC (permalink / raw)
  To: xen-devel; +Cc: julien.grall, tim, Ian Campbell, stefano.stabellini

Level 0 does not support superpage mappings, meaning that systems on where Xen
is loaded above 512GB (I'm not aware of any such systems) the 1:1 mapping on
the boot page tables is invalid.

In order to avoid this issue we need an additional first level page table
mapped by the appropriate L0 slot and containing a 1:1 superpage mapping.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Julien Grall <julien.grall@linaro.org>
---
v2: Fixed stray hard tab
---
 xen/arch/arm/arm64/head.S |   21 ++++++++++++++++++---
 xen/arch/arm/mm.c         |    3 +++
 2 files changed, 21 insertions(+), 3 deletions(-)

diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S
index 7c04e5b..2412fe8 100644
--- a/xen/arch/arm/arm64/head.S
+++ b/xen/arch/arm/arm64/head.S
@@ -267,13 +267,28 @@ skip_bss:
         orr   x2, x1, x3             /*       + rights for linear PT */
         str   x2, [x4, #0]           /* Map it in slot 0 */
 
-        /* ... map of paddr(start) in boot_pgtable */
+        /* ... map of paddr(start) in boot_pgtable+boot_first_id */
         lsr   x1, x19, #ZEROETH_SHIFT/* Offset of base paddr in boot_pgtable */
         cbz   x1, 1f                 /* It's in slot 0, map in boot_first
                                       * or boot_second later on */
 
-        lsl   x2, x1, #ZEROETH_SHIFT /* Base address for 512GB mapping */
-        mov   x3, #PT_MEM            /* x2 := Section mapping */
+        /* Level zero does not support superpage mappings, so we have
+         * to use an extra first level page in which we create a 1GB mapping.
+         */
+        ldr   x2, =boot_first_id
+        add   x2, x2, x20            /* x2 := paddr (boot_first_id) */
+
+        mov   x3, #PT_PT             /* x2 := table map of boot_first_id */
+        orr   x2, x2, x3             /*       + rights for linear PT */
+        lsl   x1, x1, #3             /* x1 := Slot offset */
+        str   x2, [x4, x1]
+
+        ldr   x4, =boot_first_id     /* Next level into boot_first_id */
+        add   x4, x4, x20            /* x4 := paddr(boot_first_id) */
+
+        lsr   x1, x19, #FIRST_SHIFT  /* x1 := Offset of base paddr in boot_first_id */
+        lsl   x2, x1, #FIRST_SHIFT   /* x2 := Base address for 1GB mapping */
+        mov   x3, #PT_MEM            /* x2 := Section map */
         orr   x2, x2, x3
         and   x1, x1, #LPAE_ENTRY_MASK /* x1 := Slot offset */
         lsl   x1, x1, #3
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index fdc7c98..0a243b0 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -68,6 +68,7 @@ struct domain *dom_xen, *dom_io, *dom_cow;
 lpae_t boot_pgtable[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
 #ifdef CONFIG_ARM_64
 lpae_t boot_first[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
+lpae_t boot_first_id[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
 #endif
 lpae_t boot_second[LPAE_ENTRIES]  __attribute__((__aligned__(4096)));
 lpae_t boot_third[LPAE_ENTRIES]  __attribute__((__aligned__(4096)));
@@ -491,6 +492,8 @@ void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr)
 #ifdef CONFIG_ARM_64
     memset(boot_first, 0x0, PAGE_SIZE);
     clean_and_invalidate_xen_dcache(boot_first);
+    memset(boot_first_id, 0x0, PAGE_SIZE);
+    clean_and_invalidate_xen_dcache(boot_first_id);
 #endif
     memset(boot_second, 0x0, PAGE_SIZE);
     clean_and_invalidate_xen_dcache(boot_second);
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH v3 4/6] xen: arm: avoid unnecessary additional mappings in boot page tables.
  2014-07-21 12:59 ` [PATCH v3 1/6] xen: arm: correct whitespace/comments and use #defines in head.S Ian Campbell
  2014-07-21 12:59   ` [PATCH v3 2/6] xen: arm: Handle 4K aligned hypervisor load address Ian Campbell
  2014-07-21 12:59   ` [PATCH v3 3/6] xen: arm: Do not use level 0 section mappings in boot page tables Ian Campbell
@ 2014-07-21 12:59   ` Ian Campbell
  2014-07-21 12:59   ` [PATCH v3 5/6] xen: arm: ensure that the boot code is <4K in size Ian Campbell
  2014-07-21 13:00   ` [PATCH v3 6/6] xen: arm: Correctly use GLOBAL/ENTRY in head.S, avoid .global Ian Campbell
  4 siblings, 0 replies; 13+ messages in thread
From: Ian Campbell @ 2014-07-21 12:59 UTC (permalink / raw)
  To: xen-devel; +Cc: julien.grall, tim, Ian Campbell, stefano.stabellini

If the identity map is created at one level then avoid creating
entries further down the boot page tables, since these will be aliases at
strange virtual address.

For example consider an arm32 system (for simplicity) with Xen loaded at
address 0x40402000. As a virtual address this corresponds to walking offsets 1,
2 and 2 at the first, second and third levels respectively.

When creating the identity map we will therefore create a 1GB super mapping at
0x40000000 for the identity map, which is the one we want to use.

However when considering the second level we will see the offset 2 and create a
2MB mapping in slot 2 of boot_second. Since boot_second is mapped in slot 0 of
boot_first this corresponds to an unwanted mapping from virtual address
0x00400000 to physical address 0x40400000.

We still do not handle the case where the load address is within the 2MB range
starting just after XEN_VIRT_START. This is not a regression but this patch
tries to provide a more useful diagnostic message. We do handle loading at
exactly XEN_VIRT_START.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Julien Grall <julien.grall@linaro.org>
---
v2: Expanded on commit message.
    Handle/accept being loaded at exactly 2M
---
 xen/arch/arm/arm32/head.S |   20 +++++++++++++++++---
 xen/arch/arm/arm64/head.S |   19 ++++++++++++++++---
 2 files changed, 33 insertions(+), 6 deletions(-)

diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
index 9bc893f..afb4c6d 100644
--- a/xen/arch/arm/arm32/head.S
+++ b/xen/arch/arm/arm32/head.S
@@ -45,7 +45,7 @@
  *   r3  -
  *   r4  -
  *   r5  -
- *   r6  -
+ *   r6  - identity map in place
  *   r7  - CPUID
  *   r8  - DTB address (boot CPU only)
  *   r9  - paddr(start)
@@ -250,6 +250,14 @@ cpu_init_done:
          * mapping. So each CPU must rebuild the page tables here with
          * the 1:1 in place. */
 
+        /* If Xen is loaded at exactly XEN_VIRT_START then we don't
+         * need an additional 1:1 mapping, the virtual mapping will
+         * suffice.
+         */
+        cmp   r9, #XEN_VIRT_START
+        moveq r6, #1                 /* r6 := identity map now in place */
+        movne r6, #0                 /* r6 := identity map not yet in place */
+
         /* Write Xen's PT's paddr into the HTTBR */
         ldr   r4, =boot_pgtable
         add   r4, r4, r10            /* r4 := paddr (boot_pagetable) */
@@ -275,6 +283,7 @@ cpu_init_done:
         orr   r2, r2, #PT_LOWER(MEM)
         lsl   r1, r1, #3             /* r1 := Slot offset */
         strd  r2, r3, [r4, r1]       /* Mapping of paddr(start) */
+        mov   r6, #1                 /* r6 := identity map now in place */
 
 1:      /* Setup boot_second: */
         ldr   r4, =boot_second
@@ -290,6 +299,8 @@ cpu_init_done:
         strd  r2, r3, [r4, #8]       /* Map it in slot 1 */
 
         /* ... map of paddr(start) in boot_second */
+        cmp   r6, #1                 /* r6 is set if already created */
+        beq   1f
         lsr   r2, r9, #SECOND_SHIFT  /* Offset of base paddr in boot_second */
         ldr   r3, =LPAE_ENTRY_MASK
         and   r1, r2, r3
@@ -302,6 +313,7 @@ cpu_init_done:
         mov   r3, #0x0
         lsl   r1, r1, #3             /* r1 := Slot offset */
         strd  r2, r3, [r4, r1]       /* Mapping of paddr(start) */
+        mov   r6, #1                 /* r6 := identity map now in place */
 
         /* Setup boot_third: */
 1:      ldr   r4, =boot_third
@@ -326,8 +338,10 @@ cpu_init_done:
 
         /* boot pagetable setup complete */
 
-        b     1f
-
+        cmp   r6, #1                /* Did we manage to create an identity mapping ? */
+        beq   1f
+        PRINT("Unable to build boot page tables - Failed to identity map Xen.\r\n")
+        b     fail
 virtphys_clash:
         /* Identity map clashes with boot_third, which we cannot handle yet */
         PRINT("Unable to build boot page tables - virt and phys addresses clash.\r\n")
diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S
index 2412fe8..4e22374 100644
--- a/xen/arch/arm/arm64/head.S
+++ b/xen/arch/arm/arm64/head.S
@@ -62,7 +62,7 @@
  *  x22 - is_secondary_cpu
  *  x23 - UART address
  *  x24 - cpuid
- *  x25 -
+ *  x25 - identity map in place
  *  x26 -
  *  x27 -
  *  x28 -
@@ -253,6 +253,13 @@ skip_bss:
          * mapping. So each CPU must rebuild the page tables here with
          * the 1:1 in place. */
 
+        /* If Xen is loaded at exactly XEN_VIRT_START then we don't
+         * need an additional 1:1 mapping, the virtual mapping will
+         * suffice.
+         */
+        cmp   x19, #XEN_VIRT_START
+        cset  x25, eq                /* x25 := identity map in place, or not */
+
         /* Write Xen's PT's paddr into TTBR0_EL2 */
         ldr   x4, =boot_pgtable
         add   x4, x4, x20            /* x4 := paddr (boot_pagetable) */
@@ -293,6 +300,7 @@ skip_bss:
         and   x1, x1, #LPAE_ENTRY_MASK /* x1 := Slot offset */
         lsl   x1, x1, #3
         str   x2, [x4, x1]           /* Mapping of paddr(start) */
+        mov   x25, #1                /* x25 := identity map now in place */
 
 1:      /* Setup boot_first: */
         ldr   x4, =boot_first        /* Next level into boot_first */
@@ -306,6 +314,7 @@ skip_bss:
         str   x2, [x4, #0]           /* Map it in slot 0 */
 
         /* ... map of paddr(start) in boot_first */
+        cbnz  x25, 1f                /* x25 is set if already created */
         lsr   x2, x19, #FIRST_SHIFT  /* x2 := Offset of base paddr in boot_first */
         and   x1, x2, #LPAE_ENTRY_MASK /* x1 := Slot to use */
         cbz   x1, 1f                 /* It's in slot 0, map in boot_second */
@@ -315,6 +324,7 @@ skip_bss:
         orr   x2, x2, x3
         lsl   x1, x1, #3             /* x1 := Slot offset */
         str   x2, [x4, x1]           /* Create mapping of paddr(start)*/
+        mov   x25, #1                /* x25 := identity map now in place */
 
 1:      /* Setup boot_second: */
         ldr   x4, =boot_second       /* Next level into boot_second */
@@ -328,6 +338,7 @@ skip_bss:
         str   x2, [x4, #8]           /* Map it in slot 1 */
 
         /* ... map of paddr(start) in boot_second */
+        cbnz  x25, 1f                /* x25 is set if already created */
         lsr   x2, x19, #SECOND_SHIFT /* x2 := Offset of base paddr in boot_second */
         and   x1, x2, #LPAE_ENTRY_MASK /* x1 := Slot to use */
         cmp   x1, #1
@@ -338,6 +349,7 @@ skip_bss:
         orr   x2, x2, x3
         lsl   x1, x1, #3             /* x1 := Slot offset */
         str   x2, [x4, x1]           /* Create mapping of paddr(start)*/
+        mov   x25, #1                /* x25 := identity map now in place */
 
 1:      /* Setup boot_third: */
         ldr   x4, =boot_third
@@ -361,8 +373,9 @@ skip_bss:
 
         /* boot pagetable setup complete */
 
-        b     1f
-
+        cbnz  x25, 1f                /* Did we manage to create an identity mapping ? */
+        PRINT("Unable to build boot page tables - Failed to identity map Xen.\r\n")
+        b     fail
 virtphys_clash:
         /* Identity map clashes with boot_third, which we cannot handle yet */
         PRINT("Unable to build boot page tables - virt and phys addresses clash.\r\n")
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH v3 5/6] xen: arm: ensure that the boot code is <4K in size
  2014-07-21 12:59 ` [PATCH v3 1/6] xen: arm: correct whitespace/comments and use #defines in head.S Ian Campbell
                     ` (2 preceding siblings ...)
  2014-07-21 12:59   ` [PATCH v3 4/6] xen: arm: avoid unnecessary additional " Ian Campbell
@ 2014-07-21 12:59   ` Ian Campbell
  2014-07-21 13:00   ` [PATCH v3 6/6] xen: arm: Correctly use GLOBAL/ENTRY in head.S, avoid .global Ian Campbell
  4 siblings, 0 replies; 13+ messages in thread
From: Ian Campbell @ 2014-07-21 12:59 UTC (permalink / raw)
  To: xen-devel; +Cc: julien.grall, tim, Ian Campbell, stefano.stabellini

This avoids having to deal with the 1:1 boot mapping crossing a
section or page boundary.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Julien Grall <julien.grall@linaro.org>
---
v3: Use GLOBAL()
v2: New patch #5/4
---
 xen/arch/arm/arm32/head.S |    2 ++
 xen/arch/arm/arm64/head.S |    2 ++
 xen/arch/arm/xen.lds.S    |    6 ++++++
 3 files changed, 10 insertions(+)

diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
index afb4c6d..14b172f 100644
--- a/xen/arch/arm/arm32/head.S
+++ b/xen/arch/arm/arm32/head.S
@@ -457,6 +457,8 @@ fail:   PRINT("- Boot failed -\r\n")
 1:      wfe
         b     1b
 
+GLOBAL(_end_boot)
+
 /* Copy Xen to new location and switch TTBR
  * r1:r0       ttbr
  * r2          source address
diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S
index 4e22374..9497ca1 100644
--- a/xen/arch/arm/arm64/head.S
+++ b/xen/arch/arm/arm64/head.S
@@ -487,6 +487,8 @@ fail:   PRINT("- Boot failed -\r\n")
 1:      wfe
         b     1b
 
+GLOBAL(_end_boot)
+
 /* Copy Xen to new location and switch TTBR
  * x0    ttbr
  * x1    source address
diff --git a/xen/arch/arm/xen.lds.S b/xen/arch/arm/xen.lds.S
index be55dad..079e085 100644
--- a/xen/arch/arm/xen.lds.S
+++ b/xen/arch/arm/xen.lds.S
@@ -178,3 +178,9 @@ SECTIONS
   .stab.indexstr 0 : { *(.stab.indexstr) }
   .comment 0 : { *(.comment) }
 }
+
+/*
+ * We require that Xen is loaded at a 4K boundary, so this ensures that any
+ * code running on the boot time identity map cannot cross a section boundary.
+ */
+ASSERT( _end_boot - start <= PAGE_SIZE, "Boot code is larger than 4K")
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH v3 6/6] xen: arm: Correctly use GLOBAL/ENTRY in head.S, avoid .global
  2014-07-21 12:59 ` [PATCH v3 1/6] xen: arm: correct whitespace/comments and use #defines in head.S Ian Campbell
                     ` (3 preceding siblings ...)
  2014-07-21 12:59   ` [PATCH v3 5/6] xen: arm: ensure that the boot code is <4K in size Ian Campbell
@ 2014-07-21 13:00   ` Ian Campbell
  2014-07-21 14:18     ` Julien Grall
  4 siblings, 1 reply; 13+ messages in thread
From: Ian Campbell @ 2014-07-21 13:00 UTC (permalink / raw)
  To: xen-devel; +Cc: julien.grall, tim, Ian Campbell, stefano.stabellini

Use ENTRY() for function entry points since it ensures correct
alignment where GLOBAL() doesn't. The exception is the initial start
label which must be at offset 0, so just use GLOBAL() to avoid the
possibility of realignment.

Since everything happens to already be aligned there should be no
difference to the actual binary. objdump agrees.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
---
v3: New patch
---
 xen/arch/arm/arm32/head.S |    8 +++-----
 xen/arch/arm/arm64/head.S |    8 +++-----
 2 files changed, 6 insertions(+), 10 deletions(-)

diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
index 14b172f..6573a42 100644
--- a/xen/arch/arm/arm32/head.S
+++ b/xen/arch/arm/arm32/head.S
@@ -76,8 +76,7 @@
          * It should be linked at XEN_VIRT_START, and loaded at any
          * 4K-aligned address.  All of text+data+bss must fit in 2MB,
          * or the initial pagetable code below will need adjustment. */
-        .global start
-start:
+GLOBAL(start)
         /* zImage magic header, see:
          * http://www.simtec.co.uk/products/SWLINUX/files/booting_article.html#d0e309
          */
@@ -583,16 +582,15 @@ hex:    .ascii "0123456789abcdef"
 
 #else  /* CONFIG_EARLY_PRINTK */
 
+ENTRY(early_puts)
 init_uart:
-.global early_puts
-early_puts:
 puts:
 putn:   mov   pc, lr
 
 #endif /* !CONFIG_EARLY_PRINTK */
 
 /* This provides a C-API version of __lookup_processor_type */
-GLOBAL(lookup_processor_type)
+ENTRY(lookup_processor_type)
         stmfd sp!, {r4, r10, lr}
         mov   r10, #0                   /* r10 := offset between virt&phys */
         bl    __lookup_processor_type
diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S
index 9497ca1..99cc6e0 100644
--- a/xen/arch/arm/arm64/head.S
+++ b/xen/arch/arm/arm64/head.S
@@ -100,8 +100,7 @@
          * or the initial pagetable code below will need adjustment.
          */
 
-        .global start
-start:
+GLOBAL(start)
         /*
          * DO NOT MODIFY. Image header expected by Linux boot-loaders.
          */
@@ -604,9 +603,8 @@ hex:    .ascii "0123456789abcdef"
 
 #else  /* CONFIG_EARLY_PRINTK */
 
+ENTRY(early_puts)
 init_uart:
-.global early_puts
-early_puts:
 puts:
 putn:   ret
 
@@ -615,7 +613,7 @@ putn:   ret
 /* This provides a C-API version of __lookup_processor_type
  * TODO: For now, the implementation return NULL every time
  */
-GLOBAL(lookup_processor_type)
+ENTRY(lookup_processor_type)
         mov  x0, #0
         ret
 
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCH v3 2/6] xen: arm: Handle 4K aligned hypervisor load address.
  2014-07-21 12:59   ` [PATCH v3 2/6] xen: arm: Handle 4K aligned hypervisor load address Ian Campbell
@ 2014-07-21 13:20     ` Tim Deegan
  2014-07-21 13:29       ` Ian Campbell
  2014-07-21 14:16     ` Julien Grall
  1 sibling, 1 reply; 13+ messages in thread
From: Tim Deegan @ 2014-07-21 13:20 UTC (permalink / raw)
  To: Ian Campbell; +Cc: stefano.stabellini, julien.grall, xen-devel

At 13:59 +0100 on 21 Jul (1405947596), Ian Campbell wrote:
> +virtphys_clash:
> +        /* Identity map clashes with boot_third, which we cannot handle yet */
> +        PRINT("Unable to build boot page tables - virt and phys addresses clash.\r\n")

Please tag this string "- Like so -\r\n" to match the other early output
from the boot assembler.

Presumably we could get around this by making a 4k mapping when the 2M
mapping would clash (at the cost of some extra code)?  Not suggesting
that it's necessary in this version.

Tim.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v3 2/6] xen: arm: Handle 4K aligned hypervisor load address.
  2014-07-21 13:20     ` Tim Deegan
@ 2014-07-21 13:29       ` Ian Campbell
  2014-07-21 13:38         ` Tim Deegan
  0 siblings, 1 reply; 13+ messages in thread
From: Ian Campbell @ 2014-07-21 13:29 UTC (permalink / raw)
  To: Tim Deegan; +Cc: stefano.stabellini, julien.grall, xen-devel

On Mon, 2014-07-21 at 15:20 +0200, Tim Deegan wrote:
> At 13:59 +0100 on 21 Jul (1405947596), Ian Campbell wrote:
> > +virtphys_clash:
> > +        /* Identity map clashes with boot_third, which we cannot handle yet */
> > +        PRINT("Unable to build boot page tables - virt and phys addresses clash.\r\n")
> 
> Please tag this string "- Like so -\r\n" to match the other early output
> from the boot assembler.

OK. (I may do it on commit unless other comments necessitate a resend)

> Presumably we could get around this by making a 4k mapping when the 2M
> mapping would clash (at the cost of some extra code)?  

It might be workable.

We'd have to steal the slot from boot_third to point to 4K worth of 1:1
mapping instead of the expected 4K of virtual mapping and then put back
the virt mapping right after switching to the virtually mapped PC before
we touch anything mapped via that slot i.e. before touching anything
after _end_boot. Doing it at the same time as we establish the fixmap
and dtb mappings would do the trick, I think.

I'd previously concluded this wasn't possible, but don't recall why.
What am I missing this time?

> Not suggesting
> that it's necessary in this version.

Phew!

Ian.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v3 2/6] xen: arm: Handle 4K aligned hypervisor load address.
  2014-07-21 13:29       ` Ian Campbell
@ 2014-07-21 13:38         ` Tim Deegan
  2014-07-21 13:46           ` Ian Campbell
  0 siblings, 1 reply; 13+ messages in thread
From: Tim Deegan @ 2014-07-21 13:38 UTC (permalink / raw)
  To: Ian Campbell; +Cc: stefano.stabellini, julien.grall, xen-devel

At 14:29 +0100 on 21 Jul (1405949358), Ian Campbell wrote:
> On Mon, 2014-07-21 at 15:20 +0200, Tim Deegan wrote:
> > At 13:59 +0100 on 21 Jul (1405947596), Ian Campbell wrote:
> > > +virtphys_clash:
> > > +        /* Identity map clashes with boot_third, which we cannot handle yet */
> > > +        PRINT("Unable to build boot page tables - virt and phys addresses clash.\r\n")
> > 
> > Please tag this string "- Like so -\r\n" to match the other early output
> > from the boot assembler.
> 
> OK. (I may do it on commit unless other comments necessitate a resend)

Sure.

> > Presumably we could get around this by making a 4k mapping when the 2M
> > mapping would clash (at the cost of some extra code)?  
> 
> It might be workable.
> 
> We'd have to steal the slot from boot_third to point to 4K worth of 1:1
> mapping instead of the expected 4K of virtual mapping and then put back
> the virt mapping right after switching to the virtually mapped PC before
> we touch anything mapped via that slot i.e. before touching anything
> after _end_boot. Doing it at the same time as we establish the fixmap
> and dtb mappings would do the trick, I think.
> 
> I'd previously concluded this wasn't possible, but don't recall why.
> What am I missing this time?

We'd be in trouble if we used the slot that covers the boot_third
table itself.  We could work around that, e.g. by using a linear PT to
update the mapping, but it's getting a bit intricate.  At that point
I'd be inclined to look at ways to get up into C while still PIC and
doing the pagetable build & trampoline from there.

Tim.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v3 2/6] xen: arm: Handle 4K aligned hypervisor load address.
  2014-07-21 13:38         ` Tim Deegan
@ 2014-07-21 13:46           ` Ian Campbell
  0 siblings, 0 replies; 13+ messages in thread
From: Ian Campbell @ 2014-07-21 13:46 UTC (permalink / raw)
  To: Tim Deegan; +Cc: stefano.stabellini, julien.grall, xen-devel

On Mon, 2014-07-21 at 15:38 +0200, Tim Deegan wrote:
> At 14:29 +0100 on 21 Jul (1405949358), Ian Campbell wrote:
> > On Mon, 2014-07-21 at 15:20 +0200, Tim Deegan wrote:
> > > At 13:59 +0100 on 21 Jul (1405947596), Ian Campbell wrote:
> > > > +virtphys_clash:
> > > > +        /* Identity map clashes with boot_third, which we cannot handle yet */
> > > > +        PRINT("Unable to build boot page tables - virt and phys addresses clash.\r\n")
> > > 
> > > Please tag this string "- Like so -\r\n" to match the other early output
> > > from the boot assembler.
> > 
> > OK. (I may do it on commit unless other comments necessitate a resend)
> 
> Sure.
> 
> > > Presumably we could get around this by making a 4k mapping when the 2M
> > > mapping would clash (at the cost of some extra code)?  
> > 
> > It might be workable.
> > 
> > We'd have to steal the slot from boot_third to point to 4K worth of 1:1
> > mapping instead of the expected 4K of virtual mapping and then put back
> > the virt mapping right after switching to the virtually mapped PC before
> > we touch anything mapped via that slot i.e. before touching anything
> > after _end_boot. Doing it at the same time as we establish the fixmap
> > and dtb mappings would do the trick, I think.
> > 
> > I'd previously concluded this wasn't possible, but don't recall why.
> > What am I missing this time?
> 
> We'd be in trouble if we used the slot that covers the boot_third
> table itself.  We could work around that, e.g. by using a linear PT to
> update the mapping, but it's getting a bit intricate.

That's not the issue I foresaw, but it is an issue.

A linear PT would be a problem if the virt address we picked clashed
with the load address.

We might be able to get away with mapping boot_third twice, one at it's
normal virt address and again at some address just after _end, then
using whichever one doesn't clash with the 1:1.

>   At that point
> I'd be inclined to look at ways to get up into C while still PIC and
> doing the pagetable build & trampoline from there.

Having played with this for grub purposes I don't really trust -fPIC
very much on ARM. Although that was trying to be doubly clever so maybe
it's OK for more traditional uses.

Ian.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v3 2/6] xen: arm: Handle 4K aligned hypervisor load address.
  2014-07-21 12:59   ` [PATCH v3 2/6] xen: arm: Handle 4K aligned hypervisor load address Ian Campbell
  2014-07-21 13:20     ` Tim Deegan
@ 2014-07-21 14:16     ` Julien Grall
  1 sibling, 0 replies; 13+ messages in thread
From: Julien Grall @ 2014-07-21 14:16 UTC (permalink / raw)
  To: Ian Campbell, xen-devel; +Cc: tim, stefano.stabellini

Hi Ian,

On 07/21/2014 01:59 PM, Ian Campbell wrote:
> Currently the boot page tables map Xen at XEN_VIRT_START using a 2MB section
> mapping. This means that the bootloader must load Xen at a 2MB aligned address.
> Unfortunately this is not the case with UEFI on the Juno platform where Xen
> fails to boot. Furthermore the Linux boot protocol (which Xen claims to adhere
> to) does not have this restriction, therefore this is our bug and not the
> bootloader's.
> 
> Fix this by adding third level pagetables to the boot time pagetables, allowing
> us to map a Xen which is aligned only to a 4K boundary. This only affects the
> boot time page tables since Xen will later relocate itself to a 2MB aligned
> address. Strictly speaking the non-boot processors could make use of this and
> use a section mapping, but it is simpler if all processors follow the same boot
> path.
> 
> Strictly speaking the Linux boot protocol doesn't even require 4K alignment
> (and apparently Linux can cope with this), but so far all bootloaders appear to
> provide it, so support for this is left for another day.
> 
> In order to use LPAE_ENTRIES in head.S we need to define it in an asm friendly
> way.
> 
> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>

Acked-by: Julien Grall <julien.grall@linaro.org>

Regards,

> ---
> v3: Use LPAE_ENTRY_MASK.
>     Use "ldr rX, =XXX" to avoid opencoding the construction of the
>     const.
>     Update comment at start to reflect this change
> v2: Use LPAE_ENTRIES and PAGE_SIZE
>     Minor updates to the asm
> 
> update comment
> ---
>  xen/arch/arm/arm32/head.S  |   62 ++++++++++++++++++++++++++++++++------------
>  xen/arch/arm/arm64/head.S  |   62 +++++++++++++++++++++++++++++++-------------
>  xen/arch/arm/mm.c          |    8 ++++--
>  xen/include/asm-arm/page.h |    2 +-
>  4 files changed, 97 insertions(+), 37 deletions(-)
> 
> diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
> index 51501dc..9bc893f 100644
> --- a/xen/arch/arm/arm32/head.S
> +++ b/xen/arch/arm/arm32/head.S
> @@ -26,6 +26,7 @@
>  
>  #define PT_PT     0xf7f /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=1 P=1 */
>  #define PT_MEM    0xf7d /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=0 P=1 */
> +#define PT_MEM_L3 0xf7f /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=1 P=1 */
>  #define PT_DEV    0xe71 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=0 P=1 */
>  #define PT_DEV_L3 0xe73 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=1 P=1 */
>  
> @@ -73,7 +74,7 @@
>  
>          /* This must be the very first address in the loaded image.
>           * It should be linked at XEN_VIRT_START, and loaded at any
> -         * 2MB-aligned address.  All of text+data+bss must fit in 2MB,
> +         * 4K-aligned address.  All of text+data+bss must fit in 2MB,
>           * or the initial pagetable code below will need adjustment. */
>          .global start
>  start:
> @@ -258,11 +259,11 @@ cpu_init_done:
>          /* Setup boot_pgtable: */
>          ldr   r1, =boot_second
>          add   r1, r1, r10            /* r1 := paddr (boot_second) */
> -        mov   r3, #0x0
>  
>          /* ... map boot_second in boot_pgtable[0] */
>          orr   r2, r1, #PT_UPPER(PT)  /* r2:r3 := table map of boot_second */
>          orr   r2, r2, #PT_LOWER(PT)  /* (+ rights for linear PT) */
> +        mov   r3, #0x0
>          strd  r2, r3, [r4, #0]       /* Map it in slot 0 */
>  
>          /* ... map of paddr(start) in boot_pgtable */
> @@ -279,31 +280,60 @@ cpu_init_done:
>          ldr   r4, =boot_second
>          add   r4, r4, r10            /* r4 := paddr (boot_second) */
>  
> -        lsr   r2, r9, #SECOND_SHIFT  /* Base address for 2MB mapping */
> -        lsl   r2, r2, #SECOND_SHIFT
> +        ldr   r1, =boot_third
> +        add   r1, r1, r10            /* r1 := paddr (boot_third) */
> +
> +        /* ... map boot_third in boot_second[1] */
> +        orr   r2, r1, #PT_UPPER(PT)  /* r2:r3 := table map of boot_third */
> +        orr   r2, r2, #PT_LOWER(PT)  /* (+ rights for linear PT) */
> +        mov   r3, #0x0
> +        strd  r2, r3, [r4, #8]       /* Map it in slot 1 */
> +
> +        /* ... map of paddr(start) in boot_second */
> +        lsr   r2, r9, #SECOND_SHIFT  /* Offset of base paddr in boot_second */
> +        ldr   r3, =LPAE_ENTRY_MASK
> +        and   r1, r2, r3
> +        cmp   r1, #1
> +        beq   virtphys_clash         /* It's in slot 1, which we cannot handle */
> +
> +        lsl   r2, r2, #SECOND_SHIFT  /* Base address for 2MB mapping */
>          orr   r2, r2, #PT_UPPER(MEM) /* r2:r3 := section map */
>          orr   r2, r2, #PT_LOWER(MEM)
> +        mov   r3, #0x0
> +        lsl   r1, r1, #3             /* r1 := Slot offset */
> +        strd  r2, r3, [r4, r1]       /* Mapping of paddr(start) */
>  
> -        /* ... map of vaddr(start) in boot_second */
> -        ldr   r1, =start
> -        lsr   r1, #(SECOND_SHIFT - 3)   /* Slot for vaddr(start) */
> -        strd  r2, r3, [r4, r1]       /* Map vaddr(start) */
> +        /* Setup boot_third: */
> +1:      ldr   r4, =boot_third
> +        add   r4, r4, r10            /* r4 := paddr (boot_third) */
>  
> -        /* ... map of paddr(start) in boot_second */
> -        lsrs  r1, r9, #30            /* Base paddr */
> -        bne   1f                     /* If paddr(start) is not in slot 0
> -                                      * then the mapping was done in
> -                                      * boot_pgtable above */
> +        lsr   r2, r9, #THIRD_SHIFT  /* Base address for 4K mapping */
> +        lsl   r2, r2, #THIRD_SHIFT
> +        orr   r2, r2, #PT_UPPER(MEM_L3) /* r2:r3 := map */
> +        orr   r2, r2, #PT_LOWER(MEM_L3)
> +        mov   r3, #0x0
>  
> -        mov   r1, r9, lsr #(SECOND_SHIFT - 3)   /* Slot for paddr(start) */
> -        strd  r2, r3, [r4, r1]       /* Map Xen there */
> -1:
> +        /* ... map of vaddr(start) in boot_third */
> +        mov   r1, #0
> +1:      strd  r2, r3, [r4, r1]       /* Map vaddr(start) */
> +        add   r2, r2, #PAGE_SIZE     /* Next page */
> +        add   r1, r1, #8             /* Next slot */
> +        cmp   r1, #(LPAE_ENTRIES<<3) /* 512*8-byte entries per page */
> +        blo   1b
>  
>          /* Defer fixmap and dtb mapping until after paging enabled, to
>           * avoid them clashing with the 1:1 mapping. */
>  
>          /* boot pagetable setup complete */
>  
> +        b     1f
> +
> +virtphys_clash:
> +        /* Identity map clashes with boot_third, which we cannot handle yet */
> +        PRINT("Unable to build boot page tables - virt and phys addresses clash.\r\n")
> +        b     fail
> +
> +1:
>          PRINT("- Turning on paging -\r\n")
>  
>          ldr   r1, =paging            /* Explicit vaddr, not RIP-relative */
> diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S
> index d46481b..7c04e5b 100644
> --- a/xen/arch/arm/arm64/head.S
> +++ b/xen/arch/arm/arm64/head.S
> @@ -27,6 +27,7 @@
>  
>  #define PT_PT     0xf7f /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=1 P=1 */
>  #define PT_MEM    0xf7d /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=0 P=1 */
> +#define PT_MEM_L3 0xf7f /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=1 P=1 */
>  #define PT_DEV    0xe71 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=0 P=1 */
>  #define PT_DEV_L3 0xe73 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=1 P=1 */
>  
> @@ -95,7 +96,7 @@
>           *
>           * This must be the very first address in the loaded image.
>           * It should be linked at XEN_VIRT_START, and loaded at any
> -         * 2MB-aligned address.  All of text+data+bss must fit in 2MB,
> +         * 4K-aligned address.  All of text+data+bss must fit in 2MB,
>           * or the initial pagetable code below will need adjustment.
>           */
>  
> @@ -274,8 +275,9 @@ skip_bss:
>          lsl   x2, x1, #ZEROETH_SHIFT /* Base address for 512GB mapping */
>          mov   x3, #PT_MEM            /* x2 := Section mapping */
>          orr   x2, x2, x3
> -        lsl   x1, x1, #3             /* x1 := Slot offset */
> -        str   x2, [x4, x1]           /* Mapping of paddr(start)*/
> +        and   x1, x1, #LPAE_ENTRY_MASK /* x1 := Slot offset */
> +        lsl   x1, x1, #3
> +        str   x2, [x4, x1]           /* Mapping of paddr(start) */
>  
>  1:      /* Setup boot_first: */
>          ldr   x4, =boot_first        /* Next level into boot_first */
> @@ -290,7 +292,7 @@ skip_bss:
>  
>          /* ... map of paddr(start) in boot_first */
>          lsr   x2, x19, #FIRST_SHIFT  /* x2 := Offset of base paddr in boot_first */
> -        and   x1, x2, 0x1ff          /* x1 := Slot to use */
> +        and   x1, x2, #LPAE_ENTRY_MASK /* x1 := Slot to use */
>          cbz   x1, 1f                 /* It's in slot 0, map in boot_second */
>  
>          lsl   x2, x2, #FIRST_SHIFT   /* Base address for 1GB mapping */
> @@ -303,31 +305,55 @@ skip_bss:
>          ldr   x4, =boot_second       /* Next level into boot_second */
>          add   x4, x4, x20            /* x4 := paddr(boot_second) */
>  
> -        lsr   x2, x19, #SECOND_SHIFT /* Base address for 2MB mapping */
> -        lsl   x2, x2, #SECOND_SHIFT
> +        /* ... map boot_third in boot_second[1] */
> +        ldr   x1, =boot_third
> +        add   x1, x1, x20            /* x1 := paddr(boot_third) */
> +        mov   x3, #PT_PT             /* x2 := table map of boot_third */
> +        orr   x2, x1, x3             /*       + rights for linear PT */
> +        str   x2, [x4, #8]           /* Map it in slot 1 */
> +
> +        /* ... map of paddr(start) in boot_second */
> +        lsr   x2, x19, #SECOND_SHIFT /* x2 := Offset of base paddr in boot_second */
> +        and   x1, x2, #LPAE_ENTRY_MASK /* x1 := Slot to use */
> +        cmp   x1, #1
> +        b.eq  virtphys_clash         /* It's in slot 1, which we cannot handle */
> +
> +        lsl   x2, x2, #SECOND_SHIFT  /* Base address for 2MB mapping */
>          mov   x3, #PT_MEM            /* x2 := Section map */
>          orr   x2, x2, x3
> +        lsl   x1, x1, #3             /* x1 := Slot offset */
> +        str   x2, [x4, x1]           /* Create mapping of paddr(start)*/
>  
> -        /* ... map of vaddr(start) in boot_second */
> -        ldr   x1, =start
> -        lsr   x1, x1, #(SECOND_SHIFT - 3)   /* Slot for vaddr(start) */
> -        str   x2, [x4, x1]           /* Map vaddr(start) */
> +1:      /* Setup boot_third: */
> +        ldr   x4, =boot_third
> +        add   x4, x4, x20            /* x4 := paddr (boot_third) */
>  
> -        /* ... map of paddr(start) in boot_second */
> -        lsr   x1, x19, #FIRST_SHIFT  /* Base paddr */
> -        cbnz  x1, 1f                 /* If paddr(start) is not in slot 0
> -                                      * then the mapping was done in
> -                                      * boot_pgtable or boot_first above */
> +        lsr   x2, x19, #THIRD_SHIFT  /* Base address for 4K mapping */
> +        lsl   x2, x2, #THIRD_SHIFT
> +        mov   x3, #PT_MEM_L3         /* x2 := Section map */
> +        orr   x2, x2, x3
>  
> -        lsr   x1, x19, #(SECOND_SHIFT - 3)  /* Slot for paddr(start) */
> -        str   x2, [x4, x1]           /* Map Xen there */
> -1:
> +        /* ... map of vaddr(start) in boot_third */
> +        mov   x1, xzr
> +1:      str   x2, [x4, x1]           /* Map vaddr(start) */
> +        add   x2, x2, #PAGE_SIZE     /* Next page */
> +        add   x1, x1, #8             /* Next slot */
> +        cmp   x1, #(LPAE_ENTRIES<<3) /* 512 entries per page */
> +        b.lt  1b
>  
>          /* Defer fixmap and dtb mapping until after paging enabled, to
>           * avoid them clashing with the 1:1 mapping. */
>  
>          /* boot pagetable setup complete */
>  
> +        b     1f
> +
> +virtphys_clash:
> +        /* Identity map clashes with boot_third, which we cannot handle yet */
> +        PRINT("Unable to build boot page tables - virt and phys addresses clash.\r\n")
> +        b     fail
> +
> +1:
>          PRINT("- Turning on paging -\r\n")
>  
>          ldr   x1, =paging            /* Explicit vaddr, not RIP-relative */
> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> index 03a0533..fdc7c98 100644
> --- a/xen/arch/arm/mm.c
> +++ b/xen/arch/arm/mm.c
> @@ -47,8 +47,9 @@ struct domain *dom_xen, *dom_io, *dom_cow;
>   * to the CPUs own pagetables.
>   *
>   * These pagetables have a very simple structure. They include:
> - *  - a 2MB mapping of xen at XEN_VIRT_START, boot_first and
> - *    boot_second are used to populate the trie down to that mapping.
> + *  - 2MB worth of 4K mappings of xen at XEN_VIRT_START, boot_first and
> + *    boot_second are used to populate the tables down to boot_third
> + *    which contains the actual mapping.
>   *  - a 1:1 mapping of xen at its current physical address. This uses a
>   *    section mapping at whichever of boot_{pgtable,first,second}
>   *    covers that physical address.
> @@ -69,6 +70,7 @@ lpae_t boot_pgtable[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
>  lpae_t boot_first[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
>  #endif
>  lpae_t boot_second[LPAE_ENTRIES]  __attribute__((__aligned__(4096)));
> +lpae_t boot_third[LPAE_ENTRIES]  __attribute__((__aligned__(4096)));
>  
>  /* Main runtime page tables */
>  
> @@ -492,6 +494,8 @@ void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr)
>  #endif
>      memset(boot_second, 0x0, PAGE_SIZE);
>      clean_and_invalidate_xen_dcache(boot_second);
> +    memset(boot_third, 0x0, PAGE_SIZE);
> +    clean_and_invalidate_xen_dcache(boot_third);
>  
>      /* Break up the Xen mapping into 4k pages and protect them separately. */
>      for ( i = 0; i < LPAE_ENTRIES; i++ )
> diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
> index 113be5a..739038a 100644
> --- a/xen/include/asm-arm/page.h
> +++ b/xen/include/asm-arm/page.h
> @@ -396,7 +396,7 @@ static inline int gva_to_ipa(vaddr_t va, paddr_t *paddr)
>   */
>  
>  #define LPAE_SHIFT      9
> -#define LPAE_ENTRIES    (1u << LPAE_SHIFT)
> +#define LPAE_ENTRIES    (_AC(1,U) << LPAE_SHIFT)
>  #define LPAE_ENTRY_MASK (LPAE_ENTRIES - 1)
>  
>  #define THIRD_SHIFT    (PAGE_SHIFT)
> 


-- 
Julien Grall

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v3 6/6] xen: arm: Correctly use GLOBAL/ENTRY in head.S, avoid .global
  2014-07-21 13:00   ` [PATCH v3 6/6] xen: arm: Correctly use GLOBAL/ENTRY in head.S, avoid .global Ian Campbell
@ 2014-07-21 14:18     ` Julien Grall
  0 siblings, 0 replies; 13+ messages in thread
From: Julien Grall @ 2014-07-21 14:18 UTC (permalink / raw)
  To: Ian Campbell, xen-devel; +Cc: tim, stefano.stabellini

Hi Ian,

On 07/21/2014 02:00 PM, Ian Campbell wrote:
> Use ENTRY() for function entry points since it ensures correct
> alignment where GLOBAL() doesn't. The exception is the initial start
> label which must be at offset 0, so just use GLOBAL() to avoid the
> possibility of realignment.
> 
> Since everything happens to already be aligned there should be no
> difference to the actual binary. objdump agrees.
> 
> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Julien Grall <julien.grall@linaro.org>

Regards,

> ---
> v3: New patch
> ---
>  xen/arch/arm/arm32/head.S |    8 +++-----
>  xen/arch/arm/arm64/head.S |    8 +++-----
>  2 files changed, 6 insertions(+), 10 deletions(-)
> 
> diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
> index 14b172f..6573a42 100644
> --- a/xen/arch/arm/arm32/head.S
> +++ b/xen/arch/arm/arm32/head.S
> @@ -76,8 +76,7 @@
>           * It should be linked at XEN_VIRT_START, and loaded at any
>           * 4K-aligned address.  All of text+data+bss must fit in 2MB,
>           * or the initial pagetable code below will need adjustment. */
> -        .global start
> -start:
> +GLOBAL(start)
>          /* zImage magic header, see:
>           * http://www.simtec.co.uk/products/SWLINUX/files/booting_article.html#d0e309
>           */
> @@ -583,16 +582,15 @@ hex:    .ascii "0123456789abcdef"
>  
>  #else  /* CONFIG_EARLY_PRINTK */
>  
> +ENTRY(early_puts)
>  init_uart:
> -.global early_puts
> -early_puts:
>  puts:
>  putn:   mov   pc, lr
>  
>  #endif /* !CONFIG_EARLY_PRINTK */
>  
>  /* This provides a C-API version of __lookup_processor_type */
> -GLOBAL(lookup_processor_type)
> +ENTRY(lookup_processor_type)
>          stmfd sp!, {r4, r10, lr}
>          mov   r10, #0                   /* r10 := offset between virt&phys */
>          bl    __lookup_processor_type
> diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S
> index 9497ca1..99cc6e0 100644
> --- a/xen/arch/arm/arm64/head.S
> +++ b/xen/arch/arm/arm64/head.S
> @@ -100,8 +100,7 @@
>           * or the initial pagetable code below will need adjustment.
>           */
>  
> -        .global start
> -start:
> +GLOBAL(start)
>          /*
>           * DO NOT MODIFY. Image header expected by Linux boot-loaders.
>           */
> @@ -604,9 +603,8 @@ hex:    .ascii "0123456789abcdef"
>  
>  #else  /* CONFIG_EARLY_PRINTK */
>  
> +ENTRY(early_puts)
>  init_uart:
> -.global early_puts
> -early_puts:
>  puts:
>  putn:   ret
>  
> @@ -615,7 +613,7 @@ putn:   ret
>  /* This provides a C-API version of __lookup_processor_type
>   * TODO: For now, the implementation return NULL every time
>   */
> -GLOBAL(lookup_processor_type)
> +ENTRY(lookup_processor_type)
>          mov  x0, #0
>          ret
>  
> 


-- 
Julien Grall

^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2014-07-21 14:18 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2014-07-21 12:58 [PATCH v3 0/6] xen: arm: various improvements to boot time page table handling Ian Campbell
2014-07-21 12:59 ` [PATCH v3 1/6] xen: arm: correct whitespace/comments and use #defines in head.S Ian Campbell
2014-07-21 12:59   ` [PATCH v3 2/6] xen: arm: Handle 4K aligned hypervisor load address Ian Campbell
2014-07-21 13:20     ` Tim Deegan
2014-07-21 13:29       ` Ian Campbell
2014-07-21 13:38         ` Tim Deegan
2014-07-21 13:46           ` Ian Campbell
2014-07-21 14:16     ` Julien Grall
2014-07-21 12:59   ` [PATCH v3 3/6] xen: arm: Do not use level 0 section mappings in boot page tables Ian Campbell
2014-07-21 12:59   ` [PATCH v3 4/6] xen: arm: avoid unnecessary additional " Ian Campbell
2014-07-21 12:59   ` [PATCH v3 5/6] xen: arm: ensure that the boot code is <4K in size Ian Campbell
2014-07-21 13:00   ` [PATCH v3 6/6] xen: arm: Correctly use GLOBAL/ENTRY in head.S, avoid .global Ian Campbell
2014-07-21 14:18     ` Julien Grall

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).