From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
To: Linus Torvalds <torvalds@linux-foundation.org>,
Andrew Morton <akpm@linux-foundation.org>,
x86@kernel.org, Thomas Gleixner <tglx@linutronix.de>,
Ingo Molnar <mingo@redhat.com>, Arnd Bergmann <arnd@arndb.de>,
"H. Peter Anvin" <hpa@zytor.com>
Cc: Andi Kleen <ak@linux.intel.com>,
Dave Hansen <dave.hansen@intel.com>,
Andy Lutomirski <luto@amacapital.net>,
linux-arch@vger.kernel.org, linux-mm@kvack.org,
linux-kernel@vger.kernel.org,
"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [PATCHv3 31/33] x86/mm: add support for 5-level paging for KASLR
Date: Fri, 17 Feb 2017 17:13:26 +0300 [thread overview]
Message-ID: <20170217141328.164563-32-kirill.shutemov@linux.intel.com> (raw)
In-Reply-To: <20170217141328.164563-1-kirill.shutemov@linux.intel.com>
With 5-level paging randomization happens on P4D level instead of PUD.
Maximum amount of physical memory also bumped to 52-bits for 5-level
paging.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
arch/x86/mm/kaslr.c | 82 ++++++++++++++++++++++++++++++++++++++++-------------
1 file changed, 63 insertions(+), 19 deletions(-)
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index 887e57182716..662e5c4b21c8 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -6,12 +6,12 @@
*
* Entropy is generated using the KASLR early boot functions now shared in
* the lib directory (originally written by Kees Cook). Randomization is
- * done on PGD & PUD page table levels to increase possible addresses. The
- * physical memory mapping code was adapted to support PUD level virtual
- * addresses. This implementation on the best configuration provides 30,000
- * possible virtual addresses in average for each memory region. An additional
- * low memory page is used to ensure each CPU can start with a PGD aligned
- * virtual address (for realmode).
+ * done on PGD & P4D/PUD page table levels to increase possible addresses.
+ * The physical memory mapping code was adapted to support P4D/PUD level
+ * virtual addresses. This implementation on the best configuration provides
+ * 30,000 possible virtual addresses in average for each memory region.
+ * An additional low memory page is used to ensure each CPU can start with
+ * a PGD aligned virtual address (for realmode).
*
* The order of each memory region is not changed. The feature looks at
* the available space for the regions based on different configuration
@@ -70,7 +70,8 @@ static __initdata struct kaslr_memory_region {
unsigned long *base;
unsigned long size_tb;
} kaslr_regions[] = {
- { &page_offset_base, 64/* Maximum */ },
+ { &page_offset_base,
+ 1 << (__PHYSICAL_MASK_SHIFT - TB_SHIFT) /* Maximum */ },
{ &vmalloc_base, VMALLOC_SIZE_TB },
{ &vmemmap_base, 1 },
};
@@ -142,7 +143,10 @@ void __init kernel_randomize_memory(void)
*/
entropy = remain_entropy / (ARRAY_SIZE(kaslr_regions) - i);
prandom_bytes_state(&rand_state, &rand, sizeof(rand));
- entropy = (rand % (entropy + 1)) & PUD_MASK;
+ if (IS_ENABLED(CONFIG_X86_5LEVEL))
+ entropy = (rand % (entropy + 1)) & P4D_MASK;
+ else
+ entropy = (rand % (entropy + 1)) & PUD_MASK;
vaddr += entropy;
*kaslr_regions[i].base = vaddr;
@@ -151,27 +155,21 @@ void __init kernel_randomize_memory(void)
* randomization alignment.
*/
vaddr += get_padding(&kaslr_regions[i]);
- vaddr = round_up(vaddr + 1, PUD_SIZE);
+ if (IS_ENABLED(CONFIG_X86_5LEVEL))
+ vaddr = round_up(vaddr + 1, P4D_SIZE);
+ else
+ vaddr = round_up(vaddr + 1, PUD_SIZE);
remain_entropy -= entropy;
}
}
-/*
- * Create PGD aligned trampoline table to allow real mode initialization
- * of additional CPUs. Consume only 1 low memory page.
- */
-void __meminit init_trampoline(void)
+static void __meminit init_trampoline_pud(void)
{
unsigned long paddr, paddr_next;
pgd_t *pgd;
pud_t *pud_page, *pud_page_tramp;
int i;
- if (!kaslr_memory_enabled()) {
- init_trampoline_default();
- return;
- }
-
pud_page_tramp = alloc_low_page();
paddr = 0;
@@ -192,3 +190,49 @@ void __meminit init_trampoline(void)
set_pgd(&trampoline_pgd_entry,
__pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
}
+
+static void __meminit init_trampoline_p4d(void)
+{
+ unsigned long paddr, paddr_next;
+ pgd_t *pgd;
+ p4d_t *p4d_page, *p4d_page_tramp;
+ int i;
+
+ p4d_page_tramp = alloc_low_page();
+
+ paddr = 0;
+ pgd = pgd_offset_k((unsigned long)__va(paddr));
+ p4d_page = (p4d_t *) pgd_page_vaddr(*pgd);
+
+ for (i = p4d_index(paddr); i < PTRS_PER_P4D; i++, paddr = paddr_next) {
+ p4d_t *p4d, *p4d_tramp;
+ unsigned long vaddr = (unsigned long)__va(paddr);
+
+ p4d_tramp = p4d_page_tramp + p4d_index(paddr);
+ p4d = p4d_page + p4d_index(vaddr);
+ paddr_next = (paddr & P4D_MASK) + P4D_SIZE;
+
+ *p4d_tramp = *p4d;
+ }
+
+ set_pgd(&trampoline_pgd_entry,
+ __pgd(_KERNPG_TABLE | __pa(p4d_page_tramp)));
+}
+
+/*
+ * Create PGD aligned trampoline table to allow real mode initialization
+ * of additional CPUs. Consume only 1 low memory page.
+ */
+void __meminit init_trampoline(void)
+{
+
+ if (!kaslr_memory_enabled()) {
+ init_trampoline_default();
+ return;
+ }
+
+ if (IS_ENABLED(CONFIG_X86_5LEVEL))
+ init_trampoline_p4d();
+ else
+ init_trampoline_pud();
+}
--
2.11.0
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
WARNING: multiple messages have this Message-ID (diff)
From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
To: Linus Torvalds <torvalds@linux-foundation.org>,
Andrew Morton <akpm@linux-foundation.org>,
x86@kernel.org, Thomas Gleixner <tglx@linutronix.de>,
Ingo Molnar <mingo@redhat.com>, Arnd Bergmann <arnd@arndb.de>,
"H. Peter Anvin" <hpa@zytor.com>
Cc: Andi Kleen <ak@linux.intel.com>,
Dave Hansen <dave.hansen@intel.com>,
Andy Lutomirski <luto@amacapital.net>,
linux-arch@vger.kernel.org, linux-mm@kvack.org,
linux-kernel@vger.kernel.org,
"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [PATCHv3 31/33] x86/mm: add support for 5-level paging for KASLR
Date: Fri, 17 Feb 2017 17:13:26 +0300 [thread overview]
Message-ID: <20170217141328.164563-32-kirill.shutemov@linux.intel.com> (raw)
Message-ID: <20170217141326.eZU-t4dVfpbUKBjn4D0veW14PHKje59WVaSlHQG7eY0@z> (raw)
In-Reply-To: <20170217141328.164563-1-kirill.shutemov@linux.intel.com>
With 5-level paging randomization happens on P4D level instead of PUD.
Maximum amount of physical memory also bumped to 52-bits for 5-level
paging.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
arch/x86/mm/kaslr.c | 82 ++++++++++++++++++++++++++++++++++++++++-------------
1 file changed, 63 insertions(+), 19 deletions(-)
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index 887e57182716..662e5c4b21c8 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -6,12 +6,12 @@
*
* Entropy is generated using the KASLR early boot functions now shared in
* the lib directory (originally written by Kees Cook). Randomization is
- * done on PGD & PUD page table levels to increase possible addresses. The
- * physical memory mapping code was adapted to support PUD level virtual
- * addresses. This implementation on the best configuration provides 30,000
- * possible virtual addresses in average for each memory region. An additional
- * low memory page is used to ensure each CPU can start with a PGD aligned
- * virtual address (for realmode).
+ * done on PGD & P4D/PUD page table levels to increase possible addresses.
+ * The physical memory mapping code was adapted to support P4D/PUD level
+ * virtual addresses. This implementation on the best configuration provides
+ * 30,000 possible virtual addresses in average for each memory region.
+ * An additional low memory page is used to ensure each CPU can start with
+ * a PGD aligned virtual address (for realmode).
*
* The order of each memory region is not changed. The feature looks at
* the available space for the regions based on different configuration
@@ -70,7 +70,8 @@ static __initdata struct kaslr_memory_region {
unsigned long *base;
unsigned long size_tb;
} kaslr_regions[] = {
- { &page_offset_base, 64/* Maximum */ },
+ { &page_offset_base,
+ 1 << (__PHYSICAL_MASK_SHIFT - TB_SHIFT) /* Maximum */ },
{ &vmalloc_base, VMALLOC_SIZE_TB },
{ &vmemmap_base, 1 },
};
@@ -142,7 +143,10 @@ void __init kernel_randomize_memory(void)
*/
entropy = remain_entropy / (ARRAY_SIZE(kaslr_regions) - i);
prandom_bytes_state(&rand_state, &rand, sizeof(rand));
- entropy = (rand % (entropy + 1)) & PUD_MASK;
+ if (IS_ENABLED(CONFIG_X86_5LEVEL))
+ entropy = (rand % (entropy + 1)) & P4D_MASK;
+ else
+ entropy = (rand % (entropy + 1)) & PUD_MASK;
vaddr += entropy;
*kaslr_regions[i].base = vaddr;
@@ -151,27 +155,21 @@ void __init kernel_randomize_memory(void)
* randomization alignment.
*/
vaddr += get_padding(&kaslr_regions[i]);
- vaddr = round_up(vaddr + 1, PUD_SIZE);
+ if (IS_ENABLED(CONFIG_X86_5LEVEL))
+ vaddr = round_up(vaddr + 1, P4D_SIZE);
+ else
+ vaddr = round_up(vaddr + 1, PUD_SIZE);
remain_entropy -= entropy;
}
}
-/*
- * Create PGD aligned trampoline table to allow real mode initialization
- * of additional CPUs. Consume only 1 low memory page.
- */
-void __meminit init_trampoline(void)
+static void __meminit init_trampoline_pud(void)
{
unsigned long paddr, paddr_next;
pgd_t *pgd;
pud_t *pud_page, *pud_page_tramp;
int i;
- if (!kaslr_memory_enabled()) {
- init_trampoline_default();
- return;
- }
-
pud_page_tramp = alloc_low_page();
paddr = 0;
@@ -192,3 +190,49 @@ void __meminit init_trampoline(void)
set_pgd(&trampoline_pgd_entry,
__pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
}
+
+static void __meminit init_trampoline_p4d(void)
+{
+ unsigned long paddr, paddr_next;
+ pgd_t *pgd;
+ p4d_t *p4d_page, *p4d_page_tramp;
+ int i;
+
+ p4d_page_tramp = alloc_low_page();
+
+ paddr = 0;
+ pgd = pgd_offset_k((unsigned long)__va(paddr));
+ p4d_page = (p4d_t *) pgd_page_vaddr(*pgd);
+
+ for (i = p4d_index(paddr); i < PTRS_PER_P4D; i++, paddr = paddr_next) {
+ p4d_t *p4d, *p4d_tramp;
+ unsigned long vaddr = (unsigned long)__va(paddr);
+
+ p4d_tramp = p4d_page_tramp + p4d_index(paddr);
+ p4d = p4d_page + p4d_index(vaddr);
+ paddr_next = (paddr & P4D_MASK) + P4D_SIZE;
+
+ *p4d_tramp = *p4d;
+ }
+
+ set_pgd(&trampoline_pgd_entry,
+ __pgd(_KERNPG_TABLE | __pa(p4d_page_tramp)));
+}
+
+/*
+ * Create PGD aligned trampoline table to allow real mode initialization
+ * of additional CPUs. Consume only 1 low memory page.
+ */
+void __meminit init_trampoline(void)
+{
+
+ if (!kaslr_memory_enabled()) {
+ init_trampoline_default();
+ return;
+ }
+
+ if (IS_ENABLED(CONFIG_X86_5LEVEL))
+ init_trampoline_p4d();
+ else
+ init_trampoline_pud();
+}
--
2.11.0
next prev parent reply other threads:[~2017-02-17 14:13 UTC|newest]
Thread overview: 102+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-02-17 14:12 [PATCHv3 00/33] 5-level paging Kirill A. Shutemov
2017-02-17 14:12 ` [PATCHv3 01/33] x86/cpufeature: Add 5-level paging detection Kirill A. Shutemov
2017-02-17 14:12 ` Kirill A. Shutemov
2017-02-17 14:12 ` [PATCHv3 02/33] asm-generic: introduce 5level-fixup.h Kirill A. Shutemov
2017-02-17 14:12 ` [PATCHv3 03/33] asm-generic: introduce __ARCH_USE_5LEVEL_HACK Kirill A. Shutemov
2017-02-17 14:12 ` Kirill A. Shutemov
2017-02-17 14:12 ` [PATCHv3 04/33] arch, mm: convert all architectures to use 5level-fixup.h Kirill A. Shutemov
2017-02-17 14:12 ` Kirill A. Shutemov
2017-02-17 14:13 ` [PATCHv3 05/33] asm-generic: introduce <asm-generic/pgtable-nop4d.h> Kirill A. Shutemov
2017-02-17 14:13 ` [PATCHv3 06/33] mm: convert generic code to 5-level paging Kirill A. Shutemov
2017-02-17 14:13 ` Kirill A. Shutemov
2017-02-17 14:13 ` [PATCHv3 07/33] mm: introduce __p4d_alloc() Kirill A. Shutemov
2017-02-17 14:13 ` Kirill A. Shutemov
2017-02-17 14:13 ` [PATCHv3 08/33] x86: basic changes into headers for 5-level paging Kirill A. Shutemov
2017-02-17 14:13 ` Kirill A. Shutemov
2017-02-17 14:13 ` [PATCHv3 09/33] x86: trivial portion of 5-level paging conversion Kirill A. Shutemov
2017-02-17 14:13 ` [PATCHv3 10/33] x86/gup: add 5-level paging support Kirill A. Shutemov
2017-02-17 14:13 ` Kirill A. Shutemov
2017-02-17 14:13 ` [PATCHv3 11/33] x86/ident_map: " Kirill A. Shutemov
2017-02-17 14:13 ` Kirill A. Shutemov
2017-02-17 14:13 ` [PATCHv3 12/33] x86/mm: add support of p4d_t in vmalloc_fault() Kirill A. Shutemov
2017-02-17 14:13 ` [PATCHv3 13/33] x86/power: support p4d_t in hibernate code Kirill A. Shutemov
2017-02-17 14:13 ` Kirill A. Shutemov
2017-02-17 14:13 ` [PATCHv3 14/33] x86/kexec: support p4d_t Kirill A. Shutemov
2017-02-17 14:13 ` Kirill A. Shutemov
2017-02-17 14:13 ` [PATCHv3 15/33] x86/efi: handle p4d in EFI pagetables Kirill A. Shutemov
2017-02-17 14:13 ` Kirill A. Shutemov
2017-02-28 12:38 ` Matt Fleming
2017-02-28 12:38 ` Matt Fleming
2017-02-17 14:13 ` [PATCHv3 16/33] x86/mm/pat: handle additional page table Kirill A. Shutemov
2017-02-17 14:13 ` Kirill A. Shutemov
2017-02-17 14:13 ` [PATCHv3 17/33] x86/kasan: prepare clear_pgds() to switch to <asm-generic/pgtable-nop4d.h> Kirill A. Shutemov
2017-02-17 14:13 ` Kirill A. Shutemov
2017-02-17 14:13 ` [PATCHv3 18/33] x86/xen: convert __xen_pgd_walk() and xen_cleanmfnmap() to support p4d Kirill A. Shutemov
2017-02-17 14:13 ` Kirill A. Shutemov
2017-02-17 14:13 ` [PATCHv3 19/33] x86: convert the rest of the code to support p4d_t Kirill A. Shutemov
2017-02-17 14:13 ` Kirill A. Shutemov
2017-02-17 14:13 ` [PATCHv3 20/33] x86: detect 5-level paging support Kirill A. Shutemov
2017-02-17 14:13 ` Kirill A. Shutemov
2017-02-17 14:13 ` [PATCHv3 21/33] x86/asm: remove __VIRTUAL_MASK_SHIFT==47 assert Kirill A. Shutemov
2017-02-17 14:13 ` Kirill A. Shutemov
2017-02-17 14:13 ` [PATCHv3 22/33] x86/mm: define virtual memory map for 5-level paging Kirill A. Shutemov
2017-02-17 14:13 ` Kirill A. Shutemov
2017-02-17 14:13 ` [PATCHv3 23/33] x86/paravirt: make paravirt code support " Kirill A. Shutemov
2017-02-17 14:13 ` Kirill A. Shutemov
2017-02-17 14:13 ` [PATCHv3 24/33] x86/mm: basic defines/helpers for CONFIG_X86_5LEVEL Kirill A. Shutemov
2017-02-17 14:13 ` Kirill A. Shutemov
2017-02-17 14:13 ` [PATCHv3 25/33] x86/dump_pagetables: support 5-level paging Kirill A. Shutemov
2017-02-17 14:13 ` Kirill A. Shutemov
2017-02-17 14:13 ` [PATCHv3 26/33] x86/kasan: extend to " Kirill A. Shutemov
2017-02-17 14:13 ` Kirill A. Shutemov
2017-02-17 14:13 ` [PATCHv3 27/33] x86/espfix: " Kirill A. Shutemov
2017-02-17 14:13 ` Kirill A. Shutemov
2017-02-17 14:13 ` [PATCHv3 28/33] x86/mm: add support of additional page table level during early boot Kirill A. Shutemov
2017-02-17 14:13 ` Kirill A. Shutemov
2017-02-17 14:13 ` [PATCHv3 29/33] x86/mm: add sync_global_pgds() for configuration with 5-level paging Kirill A. Shutemov
2017-02-17 14:13 ` [PATCHv3 30/33] x86/mm: make kernel_physical_mapping_init() support " Kirill A. Shutemov
2017-02-17 14:13 ` Kirill A. Shutemov
2017-02-17 14:13 ` Kirill A. Shutemov [this message]
2017-02-17 14:13 ` [PATCHv3 31/33] x86/mm: add support for 5-level paging for KASLR Kirill A. Shutemov
2017-02-17 14:13 ` [PATCHv3 32/33] x86: enable 5-level paging support Kirill A. Shutemov
2017-02-17 14:13 ` Kirill A. Shutemov
2017-02-17 14:13 ` [PATCHv3 33/33] mm, x86: introduce PR_SET_MAX_VADDR and PR_GET_MAX_VADDR Kirill A. Shutemov
2017-02-17 14:13 ` Kirill A. Shutemov
[not found] ` <20170217141328.164563-34-kirill.shutemov-VuQAYsv1563Yd54FQh9/CA@public.gmane.org>
2017-02-17 16:50 ` Andy Lutomirski
2017-02-17 16:50 ` Andy Lutomirski
2017-02-21 11:54 ` Dmitry Safonov
2017-02-21 11:54 ` Dmitry Safonov
2017-02-21 12:42 ` Kirill A. Shutemov
2017-02-21 12:42 ` Kirill A. Shutemov
2017-03-06 14:00 ` Dmitry Safonov
2017-03-06 14:00 ` Dmitry Safonov
2017-03-06 14:17 ` Kirill A. Shutemov
2017-03-06 14:15 ` Dmitry Safonov
2017-03-06 14:15 ` Dmitry Safonov
2017-03-06 14:17 ` Kirill A. Shutemov
2017-02-17 17:19 ` Dave Hansen
2017-02-17 17:19 ` Dave Hansen
2017-02-17 17:21 ` Andy Lutomirski
2017-02-17 20:02 ` Linus Torvalds
2017-02-17 20:02 ` Linus Torvalds
2017-02-17 20:12 ` Andy Lutomirski
2017-02-17 20:12 ` Andy Lutomirski
2017-02-17 21:01 ` Linus Torvalds
2017-02-17 21:01 ` Linus Torvalds
2017-02-17 23:02 ` Andy Lutomirski
2017-02-17 23:02 ` Andy Lutomirski
2017-02-17 23:11 ` hpa
2017-02-17 23:11 ` hpa
2017-02-17 23:21 ` Linus Torvalds
2017-02-21 10:34 ` Catalin Marinas
2017-02-21 10:47 ` Kirill A. Shutemov
2017-02-21 10:47 ` Kirill A. Shutemov
2017-02-21 10:54 ` Catalin Marinas
[not found] ` <CA+oaBQ+s5oXqu5TqddKs9LmUbaNNPGM7=gu5On4GYrkSDu0_XA@mail.gmail.com>
2017-02-21 6:00 ` Michael Pratt
2017-02-21 6:10 ` Michael Pratt
2017-02-21 6:10 ` Michael Pratt
2017-02-17 21:04 ` Dave Hansen
2017-02-17 21:04 ` Dave Hansen
2017-02-17 21:10 ` Linus Torvalds
2017-02-17 21:10 ` Linus Torvalds
2017-02-17 21:50 ` hpa
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20170217141328.164563-32-kirill.shutemov@linux.intel.com \
--to=kirill.shutemov@linux.intel.com \
--cc=ak@linux.intel.com \
--cc=akpm@linux-foundation.org \
--cc=arnd@arndb.de \
--cc=dave.hansen@intel.com \
--cc=hpa@zytor.com \
--cc=linux-arch@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=luto@amacapital.net \
--cc=mingo@redhat.com \
--cc=tglx@linutronix.de \
--cc=torvalds@linux-foundation.org \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).