* [PATCH v2 0/4] xen: arm: various improvements to boot time page table handling
@ 2014-07-18 16:11 Ian Campbell
2014-07-18 16:12 ` [PATCH v2 1/4] xen: arm: correct whitespace/comments and use #defines in head.S Ian Campbell
` (2 more replies)
0 siblings, 3 replies; 18+ messages in thread
From: Ian Campbell @ 2014-07-18 16:11 UTC (permalink / raw)
To: xen-devel; +Cc: Stefano Stabellini, Julien Grall, Tim Deegan
Since v2 I've made it correctly handle being loaded at exactly 2M (so
virt == phys on boot) and made a few cleanups.
I've also tested with various offsets from the base of RAM:
0x00200000 (2M)
0x40000000 (1G)
0x40200000 (1G+2M)
0x80000000 (2G)
0x80400000 (2G+4M)
0x80401000 (2G+4M+2K)
On Midway (RAM starts at 0) and XGene (RAM starts at 256G). I think this
covers the interesting cases at each level except the zeroeth where I
have no hardware to test anything other than mappings in slot 0. I also
couldn't test virt==phys on 64 bit since I've no such systems with RAM
at 0.
Ian.
^ permalink raw reply [flat|nested] 18+ messages in thread
* [PATCH v2 1/4] xen: arm: correct whitespace/comments and use #defines in head.S
2014-07-18 16:11 [PATCH v2 0/4] xen: arm: various improvements to boot time page table handling Ian Campbell
@ 2014-07-18 16:12 ` Ian Campbell
2014-07-18 16:12 ` [PATCH v2 2/4] xen: arm: Handle 4K aligned hypervisor load address Ian Campbell
` (2 more replies)
2014-07-18 16:16 ` [PATCH v2 0/4] xen: arm: various improvements to boot time page table handling Ian Campbell
2014-07-18 16:57 ` [PATCH v2 5/4] xen: arm: ensure that the boot code is <4K in size Ian Campbell
2 siblings, 3 replies; 18+ messages in thread
From: Ian Campbell @ 2014-07-18 16:12 UTC (permalink / raw)
To: xen-devel; +Cc: julien.grall, tim, Ian Campbell, stefano.stabellini
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Julien Grall <julien.grall@linaro.org>
---
xen/arch/arm/arm32/head.S | 11 +++++------
xen/arch/arm/arm64/head.S | 15 +++++++--------
2 files changed, 12 insertions(+), 14 deletions(-)
diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
index 73b97cb..51501dc 100644
--- a/xen/arch/arm/arm32/head.S
+++ b/xen/arch/arm/arm32/head.S
@@ -266,10 +266,10 @@ cpu_init_done:
strd r2, r3, [r4, #0] /* Map it in slot 0 */
/* ... map of paddr(start) in boot_pgtable */
- lsrs r1, r9, #30 /* Offset of base paddr in boot_pgtable */
+ lsrs r1, r9, #FIRST_SHIFT /* Offset of base paddr in boot_pgtable */
beq 1f /* If it is in slot 0 then map in boot_second
* later on */
- lsl r2, r1, #30 /* Base address for 1GB mapping */
+ lsl r2, r1, #FIRST_SHIFT /* Base address for 1GB mapping */
orr r2, r2, #PT_UPPER(MEM) /* r2:r3 := section map */
orr r2, r2, #PT_LOWER(MEM)
lsl r1, r1, #3 /* r1 := Slot offset */
@@ -277,7 +277,7 @@ cpu_init_done:
1: /* Setup boot_second: */
ldr r4, =boot_second
- add r4, r4, r10 /* r1 := paddr (boot_second) */
+ add r4, r4, r10 /* r4 := paddr (boot_second) */
lsr r2, r9, #SECOND_SHIFT /* Base address for 2MB mapping */
lsl r2, r2, #SECOND_SHIFT
@@ -320,7 +320,7 @@ paging:
dsb
#if defined(CONFIG_EARLY_PRINTK) /* Fixmap is only used by early printk */
/* Non-boot CPUs don't need to rebuild the fixmap itself, just
- * the mapping from boot_second to xen_fixmap */
+ * the mapping from boot_second to xen_fixmap */
teq r12, #0
bne 1f
@@ -408,8 +408,7 @@ launch:
beq start_xen /* and disappear into the land of C */
b start_secondary /* (to the appropriate entry point) */
-/* Fail-stop
- * r0: string explaining why */
+/* Fail-stop */
fail: PRINT("- Boot failed -\r\n")
1: wfe
b 1b
diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S
index 7d53143..d46481b 100644
--- a/xen/arch/arm/arm64/head.S
+++ b/xen/arch/arm/arm64/head.S
@@ -267,11 +267,11 @@ skip_bss:
str x2, [x4, #0] /* Map it in slot 0 */
/* ... map of paddr(start) in boot_pgtable */
- lsr x1, x19, #39 /* Offset of base paddr in boot_pgtable */
+ lsr x1, x19, #ZEROETH_SHIFT/* Offset of base paddr in boot_pgtable */
cbz x1, 1f /* It's in slot 0, map in boot_first
* or boot_second later on */
- lsl x2, x1, #39 /* Base address for 512GB mapping */
+ lsl x2, x1, #ZEROETH_SHIFT /* Base address for 512GB mapping */
mov x3, #PT_MEM /* x2 := Section mapping */
orr x2, x2, x3
lsl x1, x1, #3 /* x1 := Slot offset */
@@ -284,7 +284,7 @@ skip_bss:
/* ... map boot_second in boot_first[0] */
ldr x1, =boot_second
add x1, x1, x20 /* x1 := paddr(boot_second) */
- mov x3, #PT_PT /* x2 := table map of boot_first */
+ mov x3, #PT_PT /* x2 := table map of boot_second */
orr x2, x1, x3 /* + rights for linear PT */
str x2, [x4, #0] /* Map it in slot 0 */
@@ -300,8 +300,8 @@ skip_bss:
str x2, [x4, x1] /* Create mapping of paddr(start)*/
1: /* Setup boot_second: */
- ldr x4, =boot_second
- add x4, x4, x20 /* x4 := paddr (boot_second) */
+ ldr x4, =boot_second /* Next level into boot_second */
+ add x4, x4, x20 /* x4 := paddr(boot_second) */
lsr x2, x19, #SECOND_SHIFT /* Base address for 2MB mapping */
lsl x2, x2, #SECOND_SHIFT
@@ -345,7 +345,7 @@ paging:
dsb sy
#if defined(CONFIG_EARLY_PRINTK) /* Fixmap is only used by early printk */
/* Non-boot CPUs don't need to rebuild the fixmap itself, just
- * the mapping from boot_second to xen_fixmap */
+ * the mapping from boot_second to xen_fixmap */
cbnz x22, 1f
/* Add UART to the fixmap table */
@@ -428,8 +428,7 @@ launch:
cbz x22, start_xen /* and disappear into the land of C */
b start_secondary /* (to the appropriate entry point) */
-/* Fail-stop
- * r0: string explaining why */
+/* Fail-stop */
fail: PRINT("- Boot failed -\r\n")
1: wfe
b 1b
--
1.7.10.4
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [PATCH v2 2/4] xen: arm: Handle 4K aligned hypervisor load address.
2014-07-18 16:12 ` [PATCH v2 1/4] xen: arm: correct whitespace/comments and use #defines in head.S Ian Campbell
@ 2014-07-18 16:12 ` Ian Campbell
2014-07-20 20:28 ` Julien Grall
2014-07-18 16:12 ` [PATCH v2 3/4] xen: arm: Do not use level 0 section mappings in boot page tables Ian Campbell
2014-07-18 16:12 ` [PATCH v2 4/4] xen: arm: avoid unnecessary additional " Ian Campbell
2 siblings, 1 reply; 18+ messages in thread
From: Ian Campbell @ 2014-07-18 16:12 UTC (permalink / raw)
To: xen-devel; +Cc: julien.grall, tim, Ian Campbell, stefano.stabellini
Currently the boot page tables map Xen at XEN_VIRT_START using a 2MB section
mapping. This means that the bootloader must load Xen at a 2MB aligned address.
Unfortunately this is not the case with UEFI on the Juno platform where Xen
fails to boot. Furthermore the Linux boot protocol (which Xen claims to adhere
to) does not have this restriction, therefore this is our bug and not the
bootloader's.
Fix this by adding third level pagetables to the boot time pagetables, allowing
us to map a Xen which is aligned only to a 4K boundary. This only affects the
boot time page tables since Xen will later relocate itself to a 2MB aligned
address. Strictly speaking the non-boot processors could make use of this and
use a section mapping, but it is simpler if all processors follow the same boot
path.
Strictly speaking the Linux boot protocol doesn't even require 4K alignment
(and apparently Linux can cope with this), but so far all bootloaders appear to
provide it, so support for this is left for another day.
In order to use LPAE_ENTRIES in head.S we need to define it in an asm friendly
way.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
---
v2: Use LPAE_ENTRIES and PAGE_SIZE
Minor updates to the asm
fixes, handle loading at 2M
---
xen/arch/arm/arm32/head.S | 61 +++++++++++++++++++++++++++++++++-----------
xen/arch/arm/arm64/head.S | 60 +++++++++++++++++++++++++++++++------------
xen/arch/arm/mm.c | 8 ++++--
xen/include/asm-arm/page.h | 2 +-
4 files changed, 96 insertions(+), 35 deletions(-)
diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
index 51501dc..0a76c2e 100644
--- a/xen/arch/arm/arm32/head.S
+++ b/xen/arch/arm/arm32/head.S
@@ -26,6 +26,7 @@
#define PT_PT 0xf7f /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=1 P=1 */
#define PT_MEM 0xf7d /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=0 P=1 */
+#define PT_MEM_L3 0xf7f /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=1 P=1 */
#define PT_DEV 0xe71 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=0 P=1 */
#define PT_DEV_L3 0xe73 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=1 P=1 */
@@ -258,11 +259,11 @@ cpu_init_done:
/* Setup boot_pgtable: */
ldr r1, =boot_second
add r1, r1, r10 /* r1 := paddr (boot_second) */
- mov r3, #0x0
/* ... map boot_second in boot_pgtable[0] */
orr r2, r1, #PT_UPPER(PT) /* r2:r3 := table map of boot_second */
orr r2, r2, #PT_LOWER(PT) /* (+ rights for linear PT) */
+ mov r3, #0x0
strd r2, r3, [r4, #0] /* Map it in slot 0 */
/* ... map of paddr(start) in boot_pgtable */
@@ -279,31 +280,61 @@ cpu_init_done:
ldr r4, =boot_second
add r4, r4, r10 /* r4 := paddr (boot_second) */
- lsr r2, r9, #SECOND_SHIFT /* Base address for 2MB mapping */
- lsl r2, r2, #SECOND_SHIFT
+ ldr r1, =boot_third
+ add r1, r1, r10 /* r1 := paddr (boot_third) */
+
+ /* ... map boot_third in boot_second[1] */
+ orr r2, r1, #PT_UPPER(PT) /* r2:r3 := table map of boot_third */
+ orr r2, r2, #PT_LOWER(PT) /* (+ rights for linear PT) */
+ mov r3, #0x0
+ strd r2, r3, [r4, #8] /* Map it in slot 1 */
+
+ /* ... map of paddr(start) in boot_second */
+ lsr r2, r9, #SECOND_SHIFT /* Offset of base paddr in boot_second */
+ mov r3, #0x0ff /* r2 := LPAE entries mask */
+ orr r3, r3, #0x100
+ and r1, r2, r3
+ cmp r1, #1
+ beq virtphys_clash /* It's in slot 1, which we cannot handle */
+
+ lsl r2, r2, #SECOND_SHIFT /* Base address for 2MB mapping */
orr r2, r2, #PT_UPPER(MEM) /* r2:r3 := section map */
orr r2, r2, #PT_LOWER(MEM)
+ mov r3, #0x0
+ lsl r1, r1, #3 /* r1 := Slot offset */
+ strd r2, r3, [r4, r1] /* Mapping of paddr(start) */
- /* ... map of vaddr(start) in boot_second */
- ldr r1, =start
- lsr r1, #(SECOND_SHIFT - 3) /* Slot for vaddr(start) */
- strd r2, r3, [r4, r1] /* Map vaddr(start) */
+ /* Setup boot_third: */
+1: ldr r4, =boot_third
+ add r4, r4, r10 /* r4 := paddr (boot_third) */
- /* ... map of paddr(start) in boot_second */
- lsrs r1, r9, #30 /* Base paddr */
- bne 1f /* If paddr(start) is not in slot 0
- * then the mapping was done in
- * boot_pgtable above */
+ lsr r2, r9, #THIRD_SHIFT /* Base address for 4K mapping */
+ lsl r2, r2, #THIRD_SHIFT
+ orr r2, r2, #PT_UPPER(MEM_L3) /* r2:r3 := map */
+ orr r2, r2, #PT_LOWER(MEM_L3)
+ mov r3, #0x0
- mov r1, r9, lsr #(SECOND_SHIFT - 3) /* Slot for paddr(start) */
- strd r2, r3, [r4, r1] /* Map Xen there */
-1:
+ /* ... map of vaddr(start) in boot_third */
+ mov r1, #0
+1: strd r2, r3, [r4, r1] /* Map vaddr(start) */
+ add r2, r2, #PAGE_SIZE /* Next page */
+ add r1, r1, #8 /* Next slot */
+ cmp r1, #(LPAE_ENTRIES<<3) /* 512*8-byte entries per page */
+ blo 1b
/* Defer fixmap and dtb mapping until after paging enabled, to
* avoid them clashing with the 1:1 mapping. */
/* boot pagetable setup complete */
+ b 1f
+
+virtphys_clash:
+ /* Identity map clashes with boot_third, which we cannot handle yet */
+ PRINT("Unable to build boot page tables - virt and phys addresses clash.\r\n")
+ b fail
+
+1:
PRINT("- Turning on paging -\r\n")
ldr r1, =paging /* Explicit vaddr, not RIP-relative */
diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S
index d46481b..5d7b2b5 100644
--- a/xen/arch/arm/arm64/head.S
+++ b/xen/arch/arm/arm64/head.S
@@ -27,6 +27,7 @@
#define PT_PT 0xf7f /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=1 P=1 */
#define PT_MEM 0xf7d /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=0 P=1 */
+#define PT_MEM_L3 0xf7f /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=1 P=1 */
#define PT_DEV 0xe71 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=0 P=1 */
#define PT_DEV_L3 0xe73 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=1 P=1 */
@@ -274,8 +275,9 @@ skip_bss:
lsl x2, x1, #ZEROETH_SHIFT /* Base address for 512GB mapping */
mov x3, #PT_MEM /* x2 := Section mapping */
orr x2, x2, x3
- lsl x1, x1, #3 /* x1 := Slot offset */
- str x2, [x4, x1] /* Mapping of paddr(start)*/
+ and x1, x1, #0x1ff /* x1 := Slot offset */
+ lsl x1, x1, #3
+ str x2, [x4, x1] /* Mapping of paddr(start) */
1: /* Setup boot_first: */
ldr x4, =boot_first /* Next level into boot_first */
@@ -290,7 +292,7 @@ skip_bss:
/* ... map of paddr(start) in boot_first */
lsr x2, x19, #FIRST_SHIFT /* x2 := Offset of base paddr in boot_first */
- and x1, x2, 0x1ff /* x1 := Slot to use */
+ and x1, x2, #0x1ff /* x1 := Slot to use */
cbz x1, 1f /* It's in slot 0, map in boot_second */
lsl x2, x2, #FIRST_SHIFT /* Base address for 1GB mapping */
@@ -303,31 +305,55 @@ skip_bss:
ldr x4, =boot_second /* Next level into boot_second */
add x4, x4, x20 /* x4 := paddr(boot_second) */
- lsr x2, x19, #SECOND_SHIFT /* Base address for 2MB mapping */
- lsl x2, x2, #SECOND_SHIFT
+ /* ... map boot_third in boot_second[1] */
+ ldr x1, =boot_third
+ add x1, x1, x20 /* x1 := paddr(boot_third) */
+ mov x3, #PT_PT /* x2 := table map of boot_third */
+ orr x2, x1, x3 /* + rights for linear PT */
+ str x2, [x4, #8] /* Map it in slot 1 */
+
+ /* ... map of paddr(start) in boot_second */
+ lsr x2, x19, #SECOND_SHIFT /* x2 := Offset of base paddr in boot_second */
+ and x1, x2, 0x1ff /* x1 := Slot to use */
+ cmp x1, #1
+ b.eq virtphys_clash /* It's in slot 1, which we cannot handle */
+
+ lsl x2, x2, #SECOND_SHIFT /* Base address for 2MB mapping */
mov x3, #PT_MEM /* x2 := Section map */
orr x2, x2, x3
+ lsl x1, x1, #3 /* x1 := Slot offset */
+ str x2, [x4, x1] /* Create mapping of paddr(start)*/
- /* ... map of vaddr(start) in boot_second */
- ldr x1, =start
- lsr x1, x1, #(SECOND_SHIFT - 3) /* Slot for vaddr(start) */
- str x2, [x4, x1] /* Map vaddr(start) */
+1: /* Setup boot_third: */
+ ldr x4, =boot_third
+ add x4, x4, x20 /* x4 := paddr (boot_third) */
- /* ... map of paddr(start) in boot_second */
- lsr x1, x19, #FIRST_SHIFT /* Base paddr */
- cbnz x1, 1f /* If paddr(start) is not in slot 0
- * then the mapping was done in
- * boot_pgtable or boot_first above */
+ lsr x2, x19, #THIRD_SHIFT /* Base address for 4K mapping */
+ lsl x2, x2, #THIRD_SHIFT
+ mov x3, #PT_MEM_L3 /* x2 := Section map */
+ orr x2, x2, x3
- lsr x1, x19, #(SECOND_SHIFT - 3) /* Slot for paddr(start) */
- str x2, [x4, x1] /* Map Xen there */
-1:
+ /* ... map of vaddr(start) in boot_third */
+ mov x1, xzr
+1: str x2, [x4, x1] /* Map vaddr(start) */
+ add x2, x2, #PAGE_SIZE /* Next page */
+ add x1, x1, #8 /* Next slot */
+ cmp x1, #(LPAE_ENTRIES<<3) /* 512 entries per page */
+ b.lt 1b
/* Defer fixmap and dtb mapping until after paging enabled, to
* avoid them clashing with the 1:1 mapping. */
/* boot pagetable setup complete */
+ b 1f
+
+virtphys_clash:
+ /* Identity map clashes with boot_third, which we cannot handle yet */
+ PRINT("Unable to build boot page tables - virt and phys addresses clash.\r\n")
+ b fail
+
+1:
PRINT("- Turning on paging -\r\n")
ldr x1, =paging /* Explicit vaddr, not RIP-relative */
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 03a0533..fdc7c98 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -47,8 +47,9 @@ struct domain *dom_xen, *dom_io, *dom_cow;
* to the CPUs own pagetables.
*
* These pagetables have a very simple structure. They include:
- * - a 2MB mapping of xen at XEN_VIRT_START, boot_first and
- * boot_second are used to populate the trie down to that mapping.
+ * - 2MB worth of 4K mappings of xen at XEN_VIRT_START, boot_first and
+ * boot_second are used to populate the tables down to boot_third
+ * which contains the actual mapping.
* - a 1:1 mapping of xen at its current physical address. This uses a
* section mapping at whichever of boot_{pgtable,first,second}
* covers that physical address.
@@ -69,6 +70,7 @@ lpae_t boot_pgtable[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
lpae_t boot_first[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
#endif
lpae_t boot_second[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
+lpae_t boot_third[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
/* Main runtime page tables */
@@ -492,6 +494,8 @@ void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr)
#endif
memset(boot_second, 0x0, PAGE_SIZE);
clean_and_invalidate_xen_dcache(boot_second);
+ memset(boot_third, 0x0, PAGE_SIZE);
+ clean_and_invalidate_xen_dcache(boot_third);
/* Break up the Xen mapping into 4k pages and protect them separately. */
for ( i = 0; i < LPAE_ENTRIES; i++ )
diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
index 113be5a..739038a 100644
--- a/xen/include/asm-arm/page.h
+++ b/xen/include/asm-arm/page.h
@@ -396,7 +396,7 @@ static inline int gva_to_ipa(vaddr_t va, paddr_t *paddr)
*/
#define LPAE_SHIFT 9
-#define LPAE_ENTRIES (1u << LPAE_SHIFT)
+#define LPAE_ENTRIES (_AC(1,U) << LPAE_SHIFT)
#define LPAE_ENTRY_MASK (LPAE_ENTRIES - 1)
#define THIRD_SHIFT (PAGE_SHIFT)
--
1.7.10.4
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [PATCH v2 3/4] xen: arm: Do not use level 0 section mappings in boot page tables.
2014-07-18 16:12 ` [PATCH v2 1/4] xen: arm: correct whitespace/comments and use #defines in head.S Ian Campbell
2014-07-18 16:12 ` [PATCH v2 2/4] xen: arm: Handle 4K aligned hypervisor load address Ian Campbell
@ 2014-07-18 16:12 ` Ian Campbell
2014-07-20 20:42 ` Julien Grall
2014-07-18 16:12 ` [PATCH v2 4/4] xen: arm: avoid unnecessary additional " Ian Campbell
2 siblings, 1 reply; 18+ messages in thread
From: Ian Campbell @ 2014-07-18 16:12 UTC (permalink / raw)
To: xen-devel; +Cc: julien.grall, tim, Ian Campbell, stefano.stabellini
Level 0 does not support superpage mappings, meaning that systems on where Xen
is loaded above 512GB (I'm not aware of any such systems) the 1:1 mapping on
the boot page tables is invalid.
In order to avoid this issue we need an additional first level page table
mapped by the appropriate L0 slot and containing a 1:1 superpage mapping.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
---
v2: Fixed stray hard tab
---
xen/arch/arm/arm64/head.S | 21 ++++++++++++++++++---
xen/arch/arm/mm.c | 3 +++
2 files changed, 21 insertions(+), 3 deletions(-)
diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S
index 5d7b2b5..6ba9d94 100644
--- a/xen/arch/arm/arm64/head.S
+++ b/xen/arch/arm/arm64/head.S
@@ -267,13 +267,28 @@ skip_bss:
orr x2, x1, x3 /* + rights for linear PT */
str x2, [x4, #0] /* Map it in slot 0 */
- /* ... map of paddr(start) in boot_pgtable */
+ /* ... map of paddr(start) in boot_pgtable+boot_first_id */
lsr x1, x19, #ZEROETH_SHIFT/* Offset of base paddr in boot_pgtable */
cbz x1, 1f /* It's in slot 0, map in boot_first
* or boot_second later on */
- lsl x2, x1, #ZEROETH_SHIFT /* Base address for 512GB mapping */
- mov x3, #PT_MEM /* x2 := Section mapping */
+ /* Level zero does not support superpage mappings, so we have
+ * to use an extra first level page in which we create a 1GB mapping.
+ */
+ ldr x2, =boot_first_id
+ add x2, x2, x20 /* x2 := paddr (boot_first_id) */
+
+ mov x3, #PT_PT /* x2 := table map of boot_first_id */
+ orr x2, x2, x3 /* + rights for linear PT */
+ lsl x1, x1, #3 /* x1 := Slot offset */
+ str x2, [x4, x1]
+
+ ldr x4, =boot_first_id /* Next level into boot_first_id */
+ add x4, x4, x20 /* x4 := paddr(boot_first_id) */
+
+ lsr x1, x19, #FIRST_SHIFT /* x1 := Offset of base paddr in boot_first_id */
+ lsl x2, x1, #FIRST_SHIFT /* x2 := Base address for 1GB mapping */
+ mov x3, #PT_MEM /* x2 := Section map */
orr x2, x2, x3
and x1, x1, #0x1ff /* x1 := Slot offset */
lsl x1, x1, #3
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index fdc7c98..0a243b0 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -68,6 +68,7 @@ struct domain *dom_xen, *dom_io, *dom_cow;
lpae_t boot_pgtable[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
#ifdef CONFIG_ARM_64
lpae_t boot_first[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
+lpae_t boot_first_id[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
#endif
lpae_t boot_second[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
lpae_t boot_third[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
@@ -491,6 +492,8 @@ void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr)
#ifdef CONFIG_ARM_64
memset(boot_first, 0x0, PAGE_SIZE);
clean_and_invalidate_xen_dcache(boot_first);
+ memset(boot_first_id, 0x0, PAGE_SIZE);
+ clean_and_invalidate_xen_dcache(boot_first_id);
#endif
memset(boot_second, 0x0, PAGE_SIZE);
clean_and_invalidate_xen_dcache(boot_second);
--
1.7.10.4
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [PATCH v2 4/4] xen: arm: avoid unnecessary additional mappings in boot page tables.
2014-07-18 16:12 ` [PATCH v2 1/4] xen: arm: correct whitespace/comments and use #defines in head.S Ian Campbell
2014-07-18 16:12 ` [PATCH v2 2/4] xen: arm: Handle 4K aligned hypervisor load address Ian Campbell
2014-07-18 16:12 ` [PATCH v2 3/4] xen: arm: Do not use level 0 section mappings in boot page tables Ian Campbell
@ 2014-07-18 16:12 ` Ian Campbell
2014-07-20 21:27 ` Julien Grall
2 siblings, 1 reply; 18+ messages in thread
From: Ian Campbell @ 2014-07-18 16:12 UTC (permalink / raw)
To: xen-devel; +Cc: julien.grall, tim, Ian Campbell, stefano.stabellini
If the identity map is created at one level then avoid creating
entries further down the boot page tables, since these will be aliases at
strange virtual address.
For example consider an arm32 system (for simplicity) with Xen loaded at
address 0x40402000. As a virtual address this corresponds to walking offsets 1,
2 and 2 at the first, second and third levels respectively.
When creating the identity map we will therefore create a 1GB super mapping at
0x40000000 for the identity map, which is the one we want to use.
However when considering the second level we will see the offset 2 and create a
2MB mapping in slot 2 of boot_second. Since boot_second is mapped in slot 0 of
boot_first this corresponds to an unwanted mapping from virtual address
0x00400000 to physical address 0x40400000.
We still do not handle the case where the load address is within the 2MB range
starting just after XEN_VIRT_START. This is not a regression but this patch
tries to provide a more useful diagnostic message. We do handle loading at
exactly XEN_VIRT_START.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
---
v2: Expanded on commit message.
Handle/accept being loaded at exactly 2M
---
xen/arch/arm/arm32/head.S | 20 +++++++++++++++++---
xen/arch/arm/arm64/head.S | 19 ++++++++++++++++---
2 files changed, 33 insertions(+), 6 deletions(-)
diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
index 0a76c2e..caf7934 100644
--- a/xen/arch/arm/arm32/head.S
+++ b/xen/arch/arm/arm32/head.S
@@ -45,7 +45,7 @@
* r3 -
* r4 -
* r5 -
- * r6 -
+ * r6 - identity map in place
* r7 - CPUID
* r8 - DTB address (boot CPU only)
* r9 - paddr(start)
@@ -250,6 +250,14 @@ cpu_init_done:
* mapping. So each CPU must rebuild the page tables here with
* the 1:1 in place. */
+ /* If Xen is loaded at exactly XEN_VIRT_START then we don't
+ * need an additional 1:1 mapping, the virtual mapping will
+ * suffice.
+ */
+ cmp r9, #XEN_VIRT_START
+ moveq r6, #1 /* r6 := identity map now in place */
+ movne r6, #0 /* r6 := identity map not yet in place */
+
/* Write Xen's PT's paddr into the HTTBR */
ldr r4, =boot_pgtable
add r4, r4, r10 /* r4 := paddr (boot_pagetable) */
@@ -275,6 +283,7 @@ cpu_init_done:
orr r2, r2, #PT_LOWER(MEM)
lsl r1, r1, #3 /* r1 := Slot offset */
strd r2, r3, [r4, r1] /* Mapping of paddr(start) */
+ mov r6, #1 /* r6 := identity map now in place */
1: /* Setup boot_second: */
ldr r4, =boot_second
@@ -290,6 +299,8 @@ cpu_init_done:
strd r2, r3, [r4, #8] /* Map it in slot 1 */
/* ... map of paddr(start) in boot_second */
+ cmp r6, #1 /* r6 is set if already created */
+ beq 1f
lsr r2, r9, #SECOND_SHIFT /* Offset of base paddr in boot_second */
mov r3, #0x0ff /* r2 := LPAE entries mask */
orr r3, r3, #0x100
@@ -303,6 +314,7 @@ cpu_init_done:
mov r3, #0x0
lsl r1, r1, #3 /* r1 := Slot offset */
strd r2, r3, [r4, r1] /* Mapping of paddr(start) */
+ mov r6, #1 /* r6 := identity map now in place */
/* Setup boot_third: */
1: ldr r4, =boot_third
@@ -327,8 +339,10 @@ cpu_init_done:
/* boot pagetable setup complete */
- b 1f
-
+ cmp r6, #1 /* Did we manage to create an identity mapping ? */
+ beq 1f
+ PRINT("Unable to build boot page tables - Failed to identity map Xen.\r\n")
+ b fail
virtphys_clash:
/* Identity map clashes with boot_third, which we cannot handle yet */
PRINT("Unable to build boot page tables - virt and phys addresses clash.\r\n")
diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S
index 6ba9d94..f28b74a 100644
--- a/xen/arch/arm/arm64/head.S
+++ b/xen/arch/arm/arm64/head.S
@@ -62,7 +62,7 @@
* x22 - is_secondary_cpu
* x23 - UART address
* x24 - cpuid
- * x25 -
+ * x25 - identity map in place
* x26 -
* x27 -
* x28 -
@@ -253,6 +253,13 @@ skip_bss:
* mapping. So each CPU must rebuild the page tables here with
* the 1:1 in place. */
+ /* If Xen is loaded at exactly XEN_VIRT_START then we don't
+ * need an additional 1:1 mapping, the virtual mapping will
+ * suffice.
+ */
+ cmp x19, #XEN_VIRT_START
+ cset x25, eq /* x25 := identity map in place, or not */
+
/* Write Xen's PT's paddr into TTBR0_EL2 */
ldr x4, =boot_pgtable
add x4, x4, x20 /* x4 := paddr (boot_pagetable) */
@@ -293,6 +300,7 @@ skip_bss:
and x1, x1, #0x1ff /* x1 := Slot offset */
lsl x1, x1, #3
str x2, [x4, x1] /* Mapping of paddr(start) */
+ mov x25, #1 /* x25 := identity map now in place */
1: /* Setup boot_first: */
ldr x4, =boot_first /* Next level into boot_first */
@@ -306,6 +314,7 @@ skip_bss:
str x2, [x4, #0] /* Map it in slot 0 */
/* ... map of paddr(start) in boot_first */
+ cbnz x25, 1f /* x25 is set if already created */
lsr x2, x19, #FIRST_SHIFT /* x2 := Offset of base paddr in boot_first */
and x1, x2, #0x1ff /* x1 := Slot to use */
cbz x1, 1f /* It's in slot 0, map in boot_second */
@@ -315,6 +324,7 @@ skip_bss:
orr x2, x2, x3
lsl x1, x1, #3 /* x1 := Slot offset */
str x2, [x4, x1] /* Create mapping of paddr(start)*/
+ mov x25, #1 /* x25 := identity map now in place */
1: /* Setup boot_second: */
ldr x4, =boot_second /* Next level into boot_second */
@@ -328,6 +338,7 @@ skip_bss:
str x2, [x4, #8] /* Map it in slot 1 */
/* ... map of paddr(start) in boot_second */
+ cbnz x25, 1f /* x25 is set if already created */
lsr x2, x19, #SECOND_SHIFT /* x2 := Offset of base paddr in boot_second */
and x1, x2, 0x1ff /* x1 := Slot to use */
cmp x1, #1
@@ -338,6 +349,7 @@ skip_bss:
orr x2, x2, x3
lsl x1, x1, #3 /* x1 := Slot offset */
str x2, [x4, x1] /* Create mapping of paddr(start)*/
+ mov x25, #1 /* x25 := identity map now in place */
1: /* Setup boot_third: */
ldr x4, =boot_third
@@ -361,8 +373,9 @@ skip_bss:
/* boot pagetable setup complete */
- b 1f
-
+ cbnz x25, 1f /* Did we manage to create an identity mapping ? */
+ PRINT("Unable to build boot page tables - Failed to identity map Xen.\r\n")
+ b fail
virtphys_clash:
/* Identity map clashes with boot_third, which we cannot handle yet */
PRINT("Unable to build boot page tables - virt and phys addresses clash.\r\n")
--
1.7.10.4
^ permalink raw reply related [flat|nested] 18+ messages in thread
* Re: [PATCH v2 0/4] xen: arm: various improvements to boot time page table handling
2014-07-18 16:11 [PATCH v2 0/4] xen: arm: various improvements to boot time page table handling Ian Campbell
2014-07-18 16:12 ` [PATCH v2 1/4] xen: arm: correct whitespace/comments and use #defines in head.S Ian Campbell
@ 2014-07-18 16:16 ` Ian Campbell
2014-07-18 16:30 ` Ian Campbell
2014-07-18 16:57 ` [PATCH v2 5/4] xen: arm: ensure that the boot code is <4K in size Ian Campbell
2 siblings, 1 reply; 18+ messages in thread
From: Ian Campbell @ 2014-07-18 16:16 UTC (permalink / raw)
To: xen-devel; +Cc: Tim Deegan, Julien Grall, Stefano Stabellini
On Fri, 2014-07-18 at 17:11 +0100, Ian Campbell wrote:
> Since v2 I've made it correctly handle being loaded at exactly 2M (so
> virt == phys on boot) and made a few cleanups.
>
> I've also tested with various offsets from the base of RAM:
> 0x00200000 (2M)
> 0x40000000 (1G)
> 0x40200000 (1G+2M)
> 0x80000000 (2G)
> 0x80400000 (2G+4M)
> 0x80401000 (2G+4M+2K)
>
> On Midway (RAM starts at 0) and XGene (RAM starts at 256G). I think this
> covers the interesting cases at each level except the zeroeth where I
> have no hardware to test anything other than mappings in slot 0. I also
> couldn't test virt==phys on 64 bit since I've no such systems with RAM
> at 0.
I just realised that I forgot to add the check that head.S fits in 4K.
I'll do that in a patch 5/4 shortly.
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH v2 0/4] xen: arm: various improvements to boot time page table handling
2014-07-18 16:16 ` [PATCH v2 0/4] xen: arm: various improvements to boot time page table handling Ian Campbell
@ 2014-07-18 16:30 ` Ian Campbell
2014-07-18 16:36 ` Andrew Cooper
0 siblings, 1 reply; 18+ messages in thread
From: Ian Campbell @ 2014-07-18 16:30 UTC (permalink / raw)
To: xen-devel; +Cc: Tim Deegan, Julien Grall, Stefano Stabellini
On Fri, 2014-07-18 at 17:16 +0100, Ian Campbell wrote:
> On Fri, 2014-07-18 at 17:11 +0100, Ian Campbell wrote:
> > Since v2 I've made it correctly handle being loaded at exactly 2M (so
> > virt == phys on boot) and made a few cleanups.
> >
> > I've also tested with various offsets from the base of RAM:
> > 0x00200000 (2M)
> > 0x40000000 (1G)
> > 0x40200000 (1G+2M)
> > 0x80000000 (2G)
> > 0x80400000 (2G+4M)
> > 0x80401000 (2G+4M+2K)
> >
> > On Midway (RAM starts at 0) and XGene (RAM starts at 256G). I think this
> > covers the interesting cases at each level except the zeroeth where I
> > have no hardware to test anything other than mappings in slot 0. I also
> > couldn't test virt==phys on 64 bit since I've no such systems with RAM
> > at 0.
>
> I just realised that I forgot to add the check that head.S fits in 4K.
> I'll do that in a patch 5/4 shortly.
On second thoughts... This requires some sort of compile or link time
error/assertion in either gnu as or ld based on the offset of a symbol
or size of a section etc. I can't find anything in either manual which
would allow this to be constructed
So I'm going to punt on this. Right now aem64/head.o is 1824 bytes and
arm32/head.o is 1920 bytes, so neither are even half way to blowing this
limit (which is an upper bound since the real limit is the offset of the
symbol "paging", at 1116 and 1196 bytes respectively).
If someone can come up with a suitable construct then we can revisit
this.
Ian.
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH v2 0/4] xen: arm: various improvements to boot time page table handling
2014-07-18 16:30 ` Ian Campbell
@ 2014-07-18 16:36 ` Andrew Cooper
2014-07-18 16:43 ` Ian Campbell
0 siblings, 1 reply; 18+ messages in thread
From: Andrew Cooper @ 2014-07-18 16:36 UTC (permalink / raw)
To: Ian Campbell, xen-devel; +Cc: Stefano Stabellini, Julien Grall, Tim Deegan
On 18/07/14 17:30, Ian Campbell wrote:
> On Fri, 2014-07-18 at 17:16 +0100, Ian Campbell wrote:
>> On Fri, 2014-07-18 at 17:11 +0100, Ian Campbell wrote:
>>> Since v2 I've made it correctly handle being loaded at exactly 2M (so
>>> virt == phys on boot) and made a few cleanups.
>>>
>>> I've also tested with various offsets from the base of RAM:
>>> 0x00200000 (2M)
>>> 0x40000000 (1G)
>>> 0x40200000 (1G+2M)
>>> 0x80000000 (2G)
>>> 0x80400000 (2G+4M)
>>> 0x80401000 (2G+4M+2K)
>>>
>>> On Midway (RAM starts at 0) and XGene (RAM starts at 256G). I think this
>>> covers the interesting cases at each level except the zeroeth where I
>>> have no hardware to test anything other than mappings in slot 0. I also
>>> couldn't test virt==phys on 64 bit since I've no such systems with RAM
>>> at 0.
>> I just realised that I forgot to add the check that head.S fits in 4K.
>> I'll do that in a patch 5/4 shortly.
> On second thoughts... This requires some sort of compile or link time
> error/assertion in either gnu as or ld based on the offset of a symbol
> or size of a section etc. I can't find anything in either manual which
> would allow this to be constructed
>
> So I'm going to punt on this. Right now aem64/head.o is 1824 bytes and
> arm32/head.o is 1920 bytes, so neither are even half way to blowing this
> limit (which is an upper bound since the real limit is the offset of the
> symbol "paging", at 1116 and 1196 bytes respectively).
>
> If someone can come up with a suitable construct then we can revisit
> this.
>
> Ian.
If I understand your problem correctly, it is exactly the same as we do
for the kexec relocation page in x86.
See the bottom of xen/arch/x86/xen.lds.S
~Andrew
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH v2 0/4] xen: arm: various improvements to boot time page table handling
2014-07-18 16:36 ` Andrew Cooper
@ 2014-07-18 16:43 ` Ian Campbell
0 siblings, 0 replies; 18+ messages in thread
From: Ian Campbell @ 2014-07-18 16:43 UTC (permalink / raw)
To: Andrew Cooper; +Cc: Stefano Stabellini, Julien Grall, Tim Deegan, xen-devel
On Fri, 2014-07-18 at 17:36 +0100, Andrew Cooper wrote:
> On 18/07/14 17:30, Ian Campbell wrote:
> > On Fri, 2014-07-18 at 17:16 +0100, Ian Campbell wrote:
> >> On Fri, 2014-07-18 at 17:11 +0100, Ian Campbell wrote:
> >>> Since v2 I've made it correctly handle being loaded at exactly 2M (so
> >>> virt == phys on boot) and made a few cleanups.
> >>>
> >>> I've also tested with various offsets from the base of RAM:
> >>> 0x00200000 (2M)
> >>> 0x40000000 (1G)
> >>> 0x40200000 (1G+2M)
> >>> 0x80000000 (2G)
> >>> 0x80400000 (2G+4M)
> >>> 0x80401000 (2G+4M+2K)
> >>>
> >>> On Midway (RAM starts at 0) and XGene (RAM starts at 256G). I think this
> >>> covers the interesting cases at each level except the zeroeth where I
> >>> have no hardware to test anything other than mappings in slot 0. I also
> >>> couldn't test virt==phys on 64 bit since I've no such systems with RAM
> >>> at 0.
> >> I just realised that I forgot to add the check that head.S fits in 4K.
> >> I'll do that in a patch 5/4 shortly.
> > On second thoughts... This requires some sort of compile or link time
> > error/assertion in either gnu as or ld based on the offset of a symbol
> > or size of a section etc. I can't find anything in either manual which
> > would allow this to be constructed
> >
> > So I'm going to punt on this. Right now aem64/head.o is 1824 bytes and
> > arm32/head.o is 1920 bytes, so neither are even half way to blowing this
> > limit (which is an upper bound since the real limit is the offset of the
> > symbol "paging", at 1116 and 1196 bytes respectively).
> >
> > If someone can come up with a suitable construct then we can revisit
> > this.
> >
> > Ian.
>
> If I understand your problem correctly, it is exactly the same as we do
> for the kexec relocation page in x86.
>
> See the bottom of xen/arch/x86/xen.lds.S
That's the badger!
Thanks. It's in "Other Linker Script Commands" in the docs rather than
somewhere I was capable of finding it without knowing the name...
Ian.
^ permalink raw reply [flat|nested] 18+ messages in thread
* [PATCH v2 5/4] xen: arm: ensure that the boot code is <4K in size
2014-07-18 16:11 [PATCH v2 0/4] xen: arm: various improvements to boot time page table handling Ian Campbell
2014-07-18 16:12 ` [PATCH v2 1/4] xen: arm: correct whitespace/comments and use #defines in head.S Ian Campbell
2014-07-18 16:16 ` [PATCH v2 0/4] xen: arm: various improvements to boot time page table handling Ian Campbell
@ 2014-07-18 16:57 ` Ian Campbell
2014-07-18 17:06 ` Andrew Cooper
2014-07-20 21:39 ` Julien Grall
2 siblings, 2 replies; 18+ messages in thread
From: Ian Campbell @ 2014-07-18 16:57 UTC (permalink / raw)
To: xen-devel; +Cc: Tim Deegan, Julien Grall, Stefano Stabellini
This avoids having to deal with the 1:1 boot mapping crossing a
section or page boundary.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
---
xen/arch/arm/arm32/head.S | 3 +++
xen/arch/arm/arm64/head.S | 3 +++
xen/arch/arm/xen.lds.S | 6 ++++++
3 files changed, 12 insertions(+)
diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
index caf7934..b4c3973 100644
--- a/xen/arch/arm/arm32/head.S
+++ b/xen/arch/arm/arm32/head.S
@@ -458,6 +458,9 @@ fail: PRINT("- Boot failed -\r\n")
1: wfe
b 1b
+.globl _end_boot
+_end_boot:
+
/* Copy Xen to new location and switch TTBR
* r1:r0 ttbr
* r2 source address
diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S
index f28b74a..a52e2e4 100644
--- a/xen/arch/arm/arm64/head.S
+++ b/xen/arch/arm/arm64/head.S
@@ -487,6 +487,9 @@ fail: PRINT("- Boot failed -\r\n")
1: wfe
b 1b
+.globl _end_boot
+_end_boot:
+
/* Copy Xen to new location and switch TTBR
* x0 ttbr
* x1 source address
diff --git a/xen/arch/arm/xen.lds.S b/xen/arch/arm/xen.lds.S
index be55dad..079e085 100644
--- a/xen/arch/arm/xen.lds.S
+++ b/xen/arch/arm/xen.lds.S
@@ -178,3 +178,9 @@ SECTIONS
.stab.indexstr 0 : { *(.stab.indexstr) }
.comment 0 : { *(.comment) }
}
+
+/*
+ * We require that Xen is loaded at a 4K boundary, so this ensures that any
+ * code running on the boot time identity map cannot cross a section boundary.
+ */
+ASSERT( _end_boot - start <= PAGE_SIZE, "Boot code is larger than 4K")
--
1.7.10.4
^ permalink raw reply related [flat|nested] 18+ messages in thread
* Re: [PATCH v2 5/4] xen: arm: ensure that the boot code is <4K in size
2014-07-18 16:57 ` [PATCH v2 5/4] xen: arm: ensure that the boot code is <4K in size Ian Campbell
@ 2014-07-18 17:06 ` Andrew Cooper
2014-07-21 10:39 ` Ian Campbell
2014-07-20 21:39 ` Julien Grall
1 sibling, 1 reply; 18+ messages in thread
From: Andrew Cooper @ 2014-07-18 17:06 UTC (permalink / raw)
To: Ian Campbell, xen-devel; +Cc: Stefano Stabellini, Julien Grall, Tim Deegan
On 18/07/14 17:57, Ian Campbell wrote:
> This avoids having to deal with the 1:1 boot mapping crossing a
> section or page boundary.
>
> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
> ---
> xen/arch/arm/arm32/head.S | 3 +++
> xen/arch/arm/arm64/head.S | 3 +++
> xen/arch/arm/xen.lds.S | 6 ++++++
> 3 files changed, 12 insertions(+)
>
> diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
> index caf7934..b4c3973 100644
> --- a/xen/arch/arm/arm32/head.S
> +++ b/xen/arch/arm/arm32/head.S
> @@ -458,6 +458,9 @@ fail: PRINT("- Boot failed -\r\n")
> 1: wfe
> b 1b
>
> +.globl _end_boot
> +_end_boot:
> +
GLOBAL() should be in scope (although git grep shows that some use of
.global has slipped back into the arm .S files)
~Andrew
> /* Copy Xen to new location and switch TTBR
> * r1:r0 ttbr
> * r2 source address
> diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S
> index f28b74a..a52e2e4 100644
> --- a/xen/arch/arm/arm64/head.S
> +++ b/xen/arch/arm/arm64/head.S
> @@ -487,6 +487,9 @@ fail: PRINT("- Boot failed -\r\n")
> 1: wfe
> b 1b
>
> +.globl _end_boot
> +_end_boot:
> +
> /* Copy Xen to new location and switch TTBR
> * x0 ttbr
> * x1 source address
> diff --git a/xen/arch/arm/xen.lds.S b/xen/arch/arm/xen.lds.S
> index be55dad..079e085 100644
> --- a/xen/arch/arm/xen.lds.S
> +++ b/xen/arch/arm/xen.lds.S
> @@ -178,3 +178,9 @@ SECTIONS
> .stab.indexstr 0 : { *(.stab.indexstr) }
> .comment 0 : { *(.comment) }
> }
> +
> +/*
> + * We require that Xen is loaded at a 4K boundary, so this ensures that any
> + * code running on the boot time identity map cannot cross a section boundary.
> + */
> +ASSERT( _end_boot - start <= PAGE_SIZE, "Boot code is larger than 4K")
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH v2 2/4] xen: arm: Handle 4K aligned hypervisor load address.
2014-07-18 16:12 ` [PATCH v2 2/4] xen: arm: Handle 4K aligned hypervisor load address Ian Campbell
@ 2014-07-20 20:28 ` Julien Grall
2014-07-21 12:00 ` Ian Campbell
0 siblings, 1 reply; 18+ messages in thread
From: Julien Grall @ 2014-07-20 20:28 UTC (permalink / raw)
To: Ian Campbell, xen-devel; +Cc: tim, stefano.stabellini
Hi Ian,
On 18/07/14 17:12, Ian Campbell wrote:
> diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
> index 51501dc..0a76c2e 100644
> --- a/xen/arch/arm/arm32/head.S
> +++ b/xen/arch/arm/arm32/head.S
> @@ -26,6 +26,7 @@
>
> #define PT_PT 0xf7f /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=1 P=1 */
> #define PT_MEM 0xf7d /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=0 P=1 */
> +#define PT_MEM_L3 0xf7f /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=1 P=1 */
> #define PT_DEV 0xe71 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=0 P=1 */
> #define PT_DEV_L3 0xe73 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=1 P=1 */
>
> @@ -258,11 +259,11 @@ cpu_init_done:
> /* Setup boot_pgtable: */
> ldr r1, =boot_second
> add r1, r1, r10 /* r1 := paddr (boot_second) */
> - mov r3, #0x0
>
> /* ... map boot_second in boot_pgtable[0] */
> orr r2, r1, #PT_UPPER(PT) /* r2:r3 := table map of boot_second */
> orr r2, r2, #PT_LOWER(PT) /* (+ rights for linear PT) */
> + mov r3, #0x0
> strd r2, r3, [r4, #0] /* Map it in slot 0 */
>
> /* ... map of paddr(start) in boot_pgtable */
> @@ -279,31 +280,61 @@ cpu_init_done:
> ldr r4, =boot_second
> add r4, r4, r10 /* r4 := paddr (boot_second) */
>
> - lsr r2, r9, #SECOND_SHIFT /* Base address for 2MB mapping */
> - lsl r2, r2, #SECOND_SHIFT
> + ldr r1, =boot_third
> + add r1, r1, r10 /* r1 := paddr (boot_third) */
> +
> + /* ... map boot_third in boot_second[1] */
> + orr r2, r1, #PT_UPPER(PT) /* r2:r3 := table map of boot_third */
> + orr r2, r2, #PT_LOWER(PT) /* (+ rights for linear PT) */
> + mov r3, #0x0
> + strd r2, r3, [r4, #8] /* Map it in slot 1 */
> +
> + /* ... map of paddr(start) in boot_second */
> + lsr r2, r9, #SECOND_SHIFT /* Offset of base paddr in boot_second */
> + mov r3, #0x0ff /* r2 := LPAE entries mask */
The register in the comment looks wrong to me.
> + orr r3, r3, #0x100
I took me several minutes to understand why the orr... I think the 2
previous assembly lines could be replaced by a single one:
ldr r3, =LPAE_ENTRY_MASK
[..]
> diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S
> index d46481b..5d7b2b5 100644
> --- a/xen/arch/arm/arm64/head.S
> +++ b/xen/arch/arm/arm64/head.S
> @@ -27,6 +27,7 @@
>
> #define PT_PT 0xf7f /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=1 P=1 */
> #define PT_MEM 0xf7d /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=0 P=1 */
> +#define PT_MEM_L3 0xf7f /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=1 P=1 */
> #define PT_DEV 0xe71 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=0 P=1 */
> #define PT_DEV_L3 0xe73 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=1 P=1 */
>
> @@ -274,8 +275,9 @@ skip_bss:
> lsl x2, x1, #ZEROETH_SHIFT /* Base address for 512GB mapping */
> mov x3, #PT_MEM /* x2 := Section mapping */
> orr x2, x2, x3
> - lsl x1, x1, #3 /* x1 := Slot offset */
> - str x2, [x4, x1] /* Mapping of paddr(start)*/
> + and x1, x1, #0x1ff /* x1 := Slot offset */
The #0x1ff could be replaced by #LPAE_ENTRY_MASK
> + lsl x1, x1, #3
> + str x2, [x4, x1] /* Mapping of paddr(start) */
>
> 1: /* Setup boot_first: */
> ldr x4, =boot_first /* Next level into boot_first */
> @@ -290,7 +292,7 @@ skip_bss:
>
> /* ... map of paddr(start) in boot_first */
> lsr x2, x19, #FIRST_SHIFT /* x2 := Offset of base paddr in boot_first */
> - and x1, x2, 0x1ff /* x1 := Slot to use */
> + and x1, x2, #0x1ff /* x1 := Slot to use */
Same here.
OOI, you only added the #. Was there any issue before?
> cbz x1, 1f /* It's in slot 0, map in boot_second */
>
> lsl x2, x2, #FIRST_SHIFT /* Base address for 1GB mapping */
> @@ -303,31 +305,55 @@ skip_bss:
> ldr x4, =boot_second /* Next level into boot_second */
> add x4, x4, x20 /* x4 := paddr(boot_second) */
>
> - lsr x2, x19, #SECOND_SHIFT /* Base address for 2MB mapping */
> - lsl x2, x2, #SECOND_SHIFT
> + /* ... map boot_third in boot_second[1] */
> + ldr x1, =boot_third
> + add x1, x1, x20 /* x1 := paddr(boot_third) */
> + mov x3, #PT_PT /* x2 := table map of boot_third */
> + orr x2, x1, x3 /* + rights for linear PT */
> + str x2, [x4, #8] /* Map it in slot 1 */
> +
> + /* ... map of paddr(start) in boot_second */
> + lsr x2, x19, #SECOND_SHIFT /* x2 := Offset of base paddr in boot_second */
> + and x1, x2, 0x1ff /* x1 := Slot to use */
A bit strange that few lines above you changed a similar lines to add #,
and here you forgot it.
I would also replace the 0x1ff by LPAE_ENTRY_MASK.
Regards,
--
Julien Grall
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH v2 3/4] xen: arm: Do not use level 0 section mappings in boot page tables.
2014-07-18 16:12 ` [PATCH v2 3/4] xen: arm: Do not use level 0 section mappings in boot page tables Ian Campbell
@ 2014-07-20 20:42 ` Julien Grall
0 siblings, 0 replies; 18+ messages in thread
From: Julien Grall @ 2014-07-20 20:42 UTC (permalink / raw)
To: Ian Campbell, xen-devel; +Cc: tim, stefano.stabellini
Hi Ian,
On 18/07/14 17:12, Ian Campbell wrote:
> Level 0 does not support superpage mappings, meaning that systems on where Xen
> is loaded above 512GB (I'm not aware of any such systems) the 1:1 mapping on
> the boot page tables is invalid.
>
> In order to avoid this issue we need an additional first level page table
> mapped by the appropriate L0 slot and containing a 1:1 superpage mapping.
>
> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Julien Grall <julien.grall@linaro.org>
Regards,
> ---
> v2: Fixed stray hard tab
> ---
> xen/arch/arm/arm64/head.S | 21 ++++++++++++++++++---
> xen/arch/arm/mm.c | 3 +++
> 2 files changed, 21 insertions(+), 3 deletions(-)
>
> diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S
> index 5d7b2b5..6ba9d94 100644
> --- a/xen/arch/arm/arm64/head.S
> +++ b/xen/arch/arm/arm64/head.S
> @@ -267,13 +267,28 @@ skip_bss:
> orr x2, x1, x3 /* + rights for linear PT */
> str x2, [x4, #0] /* Map it in slot 0 */
>
> - /* ... map of paddr(start) in boot_pgtable */
> + /* ... map of paddr(start) in boot_pgtable+boot_first_id */
> lsr x1, x19, #ZEROETH_SHIFT/* Offset of base paddr in boot_pgtable */
> cbz x1, 1f /* It's in slot 0, map in boot_first
> * or boot_second later on */
>
> - lsl x2, x1, #ZEROETH_SHIFT /* Base address for 512GB mapping */
> - mov x3, #PT_MEM /* x2 := Section mapping */
> + /* Level zero does not support superpage mappings, so we have
> + * to use an extra first level page in which we create a 1GB mapping.
> + */
> + ldr x2, =boot_first_id
> + add x2, x2, x20 /* x2 := paddr (boot_first_id) */
> +
> + mov x3, #PT_PT /* x2 := table map of boot_first_id */
> + orr x2, x2, x3 /* + rights for linear PT */
> + lsl x1, x1, #3 /* x1 := Slot offset */
> + str x2, [x4, x1]
> +
> + ldr x4, =boot_first_id /* Next level into boot_first_id */
> + add x4, x4, x20 /* x4 := paddr(boot_first_id) */
> +
> + lsr x1, x19, #FIRST_SHIFT /* x1 := Offset of base paddr in boot_first_id */
> + lsl x2, x1, #FIRST_SHIFT /* x2 := Base address for 1GB mapping */
> + mov x3, #PT_MEM /* x2 := Section map */
> orr x2, x2, x3
> and x1, x1, #0x1ff /* x1 := Slot offset */
> lsl x1, x1, #3
> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> index fdc7c98..0a243b0 100644
> --- a/xen/arch/arm/mm.c
> +++ b/xen/arch/arm/mm.c
> @@ -68,6 +68,7 @@ struct domain *dom_xen, *dom_io, *dom_cow;
> lpae_t boot_pgtable[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
> #ifdef CONFIG_ARM_64
> lpae_t boot_first[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
> +lpae_t boot_first_id[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
> #endif
> lpae_t boot_second[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
> lpae_t boot_third[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
> @@ -491,6 +492,8 @@ void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr)
> #ifdef CONFIG_ARM_64
> memset(boot_first, 0x0, PAGE_SIZE);
> clean_and_invalidate_xen_dcache(boot_first);
> + memset(boot_first_id, 0x0, PAGE_SIZE);
> + clean_and_invalidate_xen_dcache(boot_first_id);
> #endif
> memset(boot_second, 0x0, PAGE_SIZE);
> clean_and_invalidate_xen_dcache(boot_second);
>
--
Julien Grall
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH v2 4/4] xen: arm: avoid unnecessary additional mappings in boot page tables.
2014-07-18 16:12 ` [PATCH v2 4/4] xen: arm: avoid unnecessary additional " Ian Campbell
@ 2014-07-20 21:27 ` Julien Grall
0 siblings, 0 replies; 18+ messages in thread
From: Julien Grall @ 2014-07-20 21:27 UTC (permalink / raw)
To: Ian Campbell, xen-devel; +Cc: tim, stefano.stabellini
Hi Ian,
On 18/07/14 17:12, Ian Campbell wrote:
> If the identity map is created at one level then avoid creating
> entries further down the boot page tables, since these will be aliases at
> strange virtual address.
>
> For example consider an arm32 system (for simplicity) with Xen loaded at
> address 0x40402000. As a virtual address this corresponds to walking offsets 1,
> 2 and 2 at the first, second and third levels respectively.
>
> When creating the identity map we will therefore create a 1GB super mapping at
> 0x40000000 for the identity map, which is the one we want to use.
>
> However when considering the second level we will see the offset 2 and create a
> 2MB mapping in slot 2 of boot_second. Since boot_second is mapped in slot 0 of
> boot_first this corresponds to an unwanted mapping from virtual address
> 0x00400000 to physical address 0x40400000.
>
> We still do not handle the case where the load address is within the 2MB range
> starting just after XEN_VIRT_START. This is not a regression but this patch
> tries to provide a more useful diagnostic message. We do handle loading at
> exactly XEN_VIRT_START.
>
> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
> ---
> v2: Expanded on commit message.
> Handle/accept being loaded at exactly 2M
Thanks for adding the explanation in the commit message. I understand
better the purpose of this patch.
Acked-by: Julien Grall <julien.grall@linaro.org>
Regards,
> ---
> xen/arch/arm/arm32/head.S | 20 +++++++++++++++++---
> xen/arch/arm/arm64/head.S | 19 ++++++++++++++++---
> 2 files changed, 33 insertions(+), 6 deletions(-)
>
> diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
> index 0a76c2e..caf7934 100644
> --- a/xen/arch/arm/arm32/head.S
> +++ b/xen/arch/arm/arm32/head.S
> @@ -45,7 +45,7 @@
> * r3 -
> * r4 -
> * r5 -
> - * r6 -
> + * r6 - identity map in place
> * r7 - CPUID
> * r8 - DTB address (boot CPU only)
> * r9 - paddr(start)
> @@ -250,6 +250,14 @@ cpu_init_done:
> * mapping. So each CPU must rebuild the page tables here with
> * the 1:1 in place. */
>
> + /* If Xen is loaded at exactly XEN_VIRT_START then we don't
> + * need an additional 1:1 mapping, the virtual mapping will
> + * suffice.
> + */
> + cmp r9, #XEN_VIRT_START
> + moveq r6, #1 /* r6 := identity map now in place */
> + movne r6, #0 /* r6 := identity map not yet in place */
> +
> /* Write Xen's PT's paddr into the HTTBR */
> ldr r4, =boot_pgtable
> add r4, r4, r10 /* r4 := paddr (boot_pagetable) */
> @@ -275,6 +283,7 @@ cpu_init_done:
> orr r2, r2, #PT_LOWER(MEM)
> lsl r1, r1, #3 /* r1 := Slot offset */
> strd r2, r3, [r4, r1] /* Mapping of paddr(start) */
> + mov r6, #1 /* r6 := identity map now in place */
>
> 1: /* Setup boot_second: */
> ldr r4, =boot_second
> @@ -290,6 +299,8 @@ cpu_init_done:
> strd r2, r3, [r4, #8] /* Map it in slot 1 */
>
> /* ... map of paddr(start) in boot_second */
> + cmp r6, #1 /* r6 is set if already created */
> + beq 1f
> lsr r2, r9, #SECOND_SHIFT /* Offset of base paddr in boot_second */
> mov r3, #0x0ff /* r2 := LPAE entries mask */
> orr r3, r3, #0x100
> @@ -303,6 +314,7 @@ cpu_init_done:
> mov r3, #0x0
> lsl r1, r1, #3 /* r1 := Slot offset */
> strd r2, r3, [r4, r1] /* Mapping of paddr(start) */
> + mov r6, #1 /* r6 := identity map now in place */
>
> /* Setup boot_third: */
> 1: ldr r4, =boot_third
> @@ -327,8 +339,10 @@ cpu_init_done:
>
> /* boot pagetable setup complete */
>
> - b 1f
> -
> + cmp r6, #1 /* Did we manage to create an identity mapping ? */
> + beq 1f
> + PRINT("Unable to build boot page tables - Failed to identity map Xen.\r\n")
> + b fail
> virtphys_clash:
> /* Identity map clashes with boot_third, which we cannot handle yet */
> PRINT("Unable to build boot page tables - virt and phys addresses clash.\r\n")
> diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S
> index 6ba9d94..f28b74a 100644
> --- a/xen/arch/arm/arm64/head.S
> +++ b/xen/arch/arm/arm64/head.S
> @@ -62,7 +62,7 @@
> * x22 - is_secondary_cpu
> * x23 - UART address
> * x24 - cpuid
> - * x25 -
> + * x25 - identity map in place
> * x26 -
> * x27 -
> * x28 -
> @@ -253,6 +253,13 @@ skip_bss:
> * mapping. So each CPU must rebuild the page tables here with
> * the 1:1 in place. */
>
> + /* If Xen is loaded at exactly XEN_VIRT_START then we don't
> + * need an additional 1:1 mapping, the virtual mapping will
> + * suffice.
> + */
> + cmp x19, #XEN_VIRT_START
> + cset x25, eq /* x25 := identity map in place, or not */
> +
> /* Write Xen's PT's paddr into TTBR0_EL2 */
> ldr x4, =boot_pgtable
> add x4, x4, x20 /* x4 := paddr (boot_pagetable) */
> @@ -293,6 +300,7 @@ skip_bss:
> and x1, x1, #0x1ff /* x1 := Slot offset */
> lsl x1, x1, #3
> str x2, [x4, x1] /* Mapping of paddr(start) */
> + mov x25, #1 /* x25 := identity map now in place */
>
> 1: /* Setup boot_first: */
> ldr x4, =boot_first /* Next level into boot_first */
> @@ -306,6 +314,7 @@ skip_bss:
> str x2, [x4, #0] /* Map it in slot 0 */
>
> /* ... map of paddr(start) in boot_first */
> + cbnz x25, 1f /* x25 is set if already created */
> lsr x2, x19, #FIRST_SHIFT /* x2 := Offset of base paddr in boot_first */
> and x1, x2, #0x1ff /* x1 := Slot to use */
> cbz x1, 1f /* It's in slot 0, map in boot_second */
> @@ -315,6 +324,7 @@ skip_bss:
> orr x2, x2, x3
> lsl x1, x1, #3 /* x1 := Slot offset */
> str x2, [x4, x1] /* Create mapping of paddr(start)*/
> + mov x25, #1 /* x25 := identity map now in place */
>
> 1: /* Setup boot_second: */
> ldr x4, =boot_second /* Next level into boot_second */
> @@ -328,6 +338,7 @@ skip_bss:
> str x2, [x4, #8] /* Map it in slot 1 */
>
> /* ... map of paddr(start) in boot_second */
> + cbnz x25, 1f /* x25 is set if already created */
> lsr x2, x19, #SECOND_SHIFT /* x2 := Offset of base paddr in boot_second */
> and x1, x2, 0x1ff /* x1 := Slot to use */
> cmp x1, #1
> @@ -338,6 +349,7 @@ skip_bss:
> orr x2, x2, x3
> lsl x1, x1, #3 /* x1 := Slot offset */
> str x2, [x4, x1] /* Create mapping of paddr(start)*/
> + mov x25, #1 /* x25 := identity map now in place */
>
> 1: /* Setup boot_third: */
> ldr x4, =boot_third
> @@ -361,8 +373,9 @@ skip_bss:
>
> /* boot pagetable setup complete */
>
> - b 1f
> -
> + cbnz x25, 1f /* Did we manage to create an identity mapping ? */
> + PRINT("Unable to build boot page tables - Failed to identity map Xen.\r\n")
> + b fail
> virtphys_clash:
> /* Identity map clashes with boot_third, which we cannot handle yet */
> PRINT("Unable to build boot page tables - virt and phys addresses clash.\r\n")
>
--
Julien Grall
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH v2 5/4] xen: arm: ensure that the boot code is <4K in size
2014-07-18 16:57 ` [PATCH v2 5/4] xen: arm: ensure that the boot code is <4K in size Ian Campbell
2014-07-18 17:06 ` Andrew Cooper
@ 2014-07-20 21:39 ` Julien Grall
1 sibling, 0 replies; 18+ messages in thread
From: Julien Grall @ 2014-07-20 21:39 UTC (permalink / raw)
To: Ian Campbell, xen-devel; +Cc: Tim Deegan, Stefano Stabellini
Hi Ian,
On 18/07/14 17:57, Ian Campbell wrote:
> This avoids having to deal with the 1:1 boot mapping crossing a
> section or page boundary.
Regardless Andrew's comment:
Acked-by: Julien Grall <julien.grall@linaro.org>
Regards,
> Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
> ---
> xen/arch/arm/arm32/head.S | 3 +++
> xen/arch/arm/arm64/head.S | 3 +++
> xen/arch/arm/xen.lds.S | 6 ++++++
> 3 files changed, 12 insertions(+)
>
> diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
> index caf7934..b4c3973 100644
> --- a/xen/arch/arm/arm32/head.S
> +++ b/xen/arch/arm/arm32/head.S
> @@ -458,6 +458,9 @@ fail: PRINT("- Boot failed -\r\n")
> 1: wfe
> b 1b
>
> +.globl _end_boot
> +_end_boot:
> +
> /* Copy Xen to new location and switch TTBR
> * r1:r0 ttbr
> * r2 source address
> diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S
> index f28b74a..a52e2e4 100644
> --- a/xen/arch/arm/arm64/head.S
> +++ b/xen/arch/arm/arm64/head.S
> @@ -487,6 +487,9 @@ fail: PRINT("- Boot failed -\r\n")
> 1: wfe
> b 1b
>
> +.globl _end_boot
> +_end_boot:
> +
> /* Copy Xen to new location and switch TTBR
> * x0 ttbr
> * x1 source address
> diff --git a/xen/arch/arm/xen.lds.S b/xen/arch/arm/xen.lds.S
> index be55dad..079e085 100644
> --- a/xen/arch/arm/xen.lds.S
> +++ b/xen/arch/arm/xen.lds.S
> @@ -178,3 +178,9 @@ SECTIONS
> .stab.indexstr 0 : { *(.stab.indexstr) }
> .comment 0 : { *(.comment) }
> }
> +
> +/*
> + * We require that Xen is loaded at a 4K boundary, so this ensures that any
> + * code running on the boot time identity map cannot cross a section boundary.
> + */
> +ASSERT( _end_boot - start <= PAGE_SIZE, "Boot code is larger than 4K")
>
--
Julien Grall
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH v2 5/4] xen: arm: ensure that the boot code is <4K in size
2014-07-18 17:06 ` Andrew Cooper
@ 2014-07-21 10:39 ` Ian Campbell
2014-07-21 11:21 ` Ian Campbell
0 siblings, 1 reply; 18+ messages in thread
From: Ian Campbell @ 2014-07-21 10:39 UTC (permalink / raw)
To: Andrew Cooper; +Cc: Stefano Stabellini, Julien Grall, Tim Deegan, xen-devel
On Fri, 2014-07-18 at 18:06 +0100, Andrew Cooper wrote:
> On 18/07/14 17:57, Ian Campbell wrote:
> > This avoids having to deal with the 1:1 boot mapping crossing a
> > section or page boundary.
> >
> > Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
> > ---
> > xen/arch/arm/arm32/head.S | 3 +++
> > xen/arch/arm/arm64/head.S | 3 +++
> > xen/arch/arm/xen.lds.S | 6 ++++++
> > 3 files changed, 12 insertions(+)
> >
> > diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
> > index caf7934..b4c3973 100644
> > --- a/xen/arch/arm/arm32/head.S
> > +++ b/xen/arch/arm/arm32/head.S
> > @@ -458,6 +458,9 @@ fail: PRINT("- Boot failed -\r\n")
> > 1: wfe
> > b 1b
> >
> > +.globl _end_boot
> > +_end_boot:
> > +
>
> GLOBAL() should be in scope (although git grep shows that some use of
> .global has slipped back into the arm .S files)
Thanks, I only knew about ENTRY which didn't seem appropriate. I don't
think this warrants a resend so I'll do it on commit unless someone
objects to that.
Ian.
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH v2 5/4] xen: arm: ensure that the boot code is <4K in size
2014-07-21 10:39 ` Ian Campbell
@ 2014-07-21 11:21 ` Ian Campbell
0 siblings, 0 replies; 18+ messages in thread
From: Ian Campbell @ 2014-07-21 11:21 UTC (permalink / raw)
To: Andrew Cooper; +Cc: Tim Deegan, Julien Grall, Stefano Stabellini, xen-devel
On Mon, 2014-07-21 at 11:39 +0100, Ian Campbell wrote:
> On Fri, 2014-07-18 at 18:06 +0100, Andrew Cooper wrote:
> > On 18/07/14 17:57, Ian Campbell wrote:
> > > This avoids having to deal with the 1:1 boot mapping crossing a
> > > section or page boundary.
> > >
> > > Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
> > > ---
> > > xen/arch/arm/arm32/head.S | 3 +++
> > > xen/arch/arm/arm64/head.S | 3 +++
> > > xen/arch/arm/xen.lds.S | 6 ++++++
> > > 3 files changed, 12 insertions(+)
> > >
> > > diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
> > > index caf7934..b4c3973 100644
> > > --- a/xen/arch/arm/arm32/head.S
> > > +++ b/xen/arch/arm/arm32/head.S
> > > @@ -458,6 +458,9 @@ fail: PRINT("- Boot failed -\r\n")
> > > 1: wfe
> > > b 1b
> > >
> > > +.globl _end_boot
> > > +_end_boot:
> > > +
> >
> > GLOBAL() should be in scope (although git grep shows that some use of
> > .global has slipped back into the arm .S files)
>
> Thanks, I only knew about ENTRY which didn't seem appropriate. I don't
> think this warrants a resend so I'll do it on commit unless someone
> objects to that.
I hadn't spotted Julien's comments on #2, so there will need to be a
respin anyway, I'll fix this for that.
Ian.
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH v2 2/4] xen: arm: Handle 4K aligned hypervisor load address.
2014-07-20 20:28 ` Julien Grall
@ 2014-07-21 12:00 ` Ian Campbell
0 siblings, 0 replies; 18+ messages in thread
From: Ian Campbell @ 2014-07-21 12:00 UTC (permalink / raw)
To: Julien Grall; +Cc: stefano.stabellini, tim, xen-devel
On Sun, 2014-07-20 at 21:28 +0100, Julien Grall wrote:
> > + mov r3, #0x0ff /* r2 := LPAE entries mask */
>
> The register in the comment looks wrong to me.
Indeed, well spotted.
>
>
> > + orr r3, r3, #0x100
>
> I took me several minutes to understand why the orr... I think the 2
> previous assembly lines could be replaced by a single one:
> ldr r3, =LPAE_ENTRY_MASK
Right. I'll do that.
> > + lsl x1, x1, #3
> > + str x2, [x4, x1] /* Mapping of paddr(start) */
> >
> > 1: /* Setup boot_first: */
> > ldr x4, =boot_first /* Next level into boot_first */
> > @@ -290,7 +292,7 @@ skip_bss:
> >
> > /* ... map of paddr(start) in boot_first */
> > lsr x2, x19, #FIRST_SHIFT /* x2 := Offset of base paddr in boot_first */
> > - and x1, x2, 0x1ff /* x1 := Slot to use */
> > + and x1, x2, #0x1ff /* x1 := Slot to use */
>
> Same here.
>
> OOI, you only added the #. Was there any issue before?
It assembles to the same thing, it's just incorrect (or at least
non-preferred) syntax.
> > + and x1, x2, 0x1ff /* x1 := Slot to use */
>
> A bit strange that few lines above you changed a similar lines to add #,
> and here you forgot it.
Nothing strange, just an oversight.
> I would also replace the 0x1ff by LPAE_ENTRY_MASK.
Ack.
Ian.
^ permalink raw reply [flat|nested] 18+ messages in thread
end of thread, other threads:[~2014-07-21 12:00 UTC | newest]
Thread overview: 18+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2014-07-18 16:11 [PATCH v2 0/4] xen: arm: various improvements to boot time page table handling Ian Campbell
2014-07-18 16:12 ` [PATCH v2 1/4] xen: arm: correct whitespace/comments and use #defines in head.S Ian Campbell
2014-07-18 16:12 ` [PATCH v2 2/4] xen: arm: Handle 4K aligned hypervisor load address Ian Campbell
2014-07-20 20:28 ` Julien Grall
2014-07-21 12:00 ` Ian Campbell
2014-07-18 16:12 ` [PATCH v2 3/4] xen: arm: Do not use level 0 section mappings in boot page tables Ian Campbell
2014-07-20 20:42 ` Julien Grall
2014-07-18 16:12 ` [PATCH v2 4/4] xen: arm: avoid unnecessary additional " Ian Campbell
2014-07-20 21:27 ` Julien Grall
2014-07-18 16:16 ` [PATCH v2 0/4] xen: arm: various improvements to boot time page table handling Ian Campbell
2014-07-18 16:30 ` Ian Campbell
2014-07-18 16:36 ` Andrew Cooper
2014-07-18 16:43 ` Ian Campbell
2014-07-18 16:57 ` [PATCH v2 5/4] xen: arm: ensure that the boot code is <4K in size Ian Campbell
2014-07-18 17:06 ` Andrew Cooper
2014-07-21 10:39 ` Ian Campbell
2014-07-21 11:21 ` Ian Campbell
2014-07-20 21:39 ` Julien Grall
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).