* [PATCH 1/2] x86/mm/ptdump: Optimize check for W+X mappings for CONFIG_KASAN=y
@ 2017-02-14 10:08 Andrey Ryabinin
2017-02-14 10:08 ` [PATCH 2/2] x86/mm/ptdump: Add address marker for KASAN shadow region Andrey Ryabinin
` (2 more replies)
0 siblings, 3 replies; 6+ messages in thread
From: Andrey Ryabinin @ 2017-02-14 10:08 UTC (permalink / raw)
To: x86, Thomas Gleixner, Ingo Molnar, H. Peter Anvin
Cc: kasan-dev, Alexander Potapenko, Dmitry Vyukov, linux-kernel,
Mark Rutland, Tobias Regnery, Andrey Ryabinin
Enabling both DEBUG_WX=y and KASAN=y options significantly increases
boot time (dozens of seconds at least).
KASAN fills kernel page tables with repeated values to map several
TBs of the virtual memory to the single kasan_zero_page:
kasan_zero_pud ->
kasan_zero_pmd->
kasan_zero_pte->
kasan_zero_page
So, the page table walker used to find W+X mapping check the same
kasan_zero_p?d page table entries a lot more than once.
With patch pud walker will skip the pud if it has the same value as
the previous one . Skipping done iff we search for W+X mappings,
so this optimization won't affect the page table dump via debugfs.
This dropped time spend in W+X check from ~30 sec to reasonable 0.1 sec:
Before:
[ 4.579991] Freeing unused kernel memory: 1000K
[ 35.257523] x86/mm: Checked W+X mappings: passed, no W+X pages found.
After:
[ 5.138756] Freeing unused kernel memory: 1000K
[ 5.266496] x86/mm: Checked W+X mappings: passed, no W+X pages found.
Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
---
arch/x86/mm/dump_pagetables.c | 16 +++++++++++++++-
1 file changed, 15 insertions(+), 1 deletion(-)
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index 8aa6bea..0813534 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -327,18 +327,31 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr,
#if PTRS_PER_PUD > 1
+/*
+ * This is an optimization for CONFIG_DEBUG_WX=y + CONFIG_KASAN=y
+ * KASAN fills page tables with the same values. Since there is no
+ * point in checking page table more than once we just skip repeated
+ * entries. This saves us dozens of seconds during boot.
+ */
+static bool pud_already_checked(pud_t *prev_pud, pud_t *pud, bool checkwx)
+{
+ return checkwx && prev_pud && (pud_val(*prev_pud) == pud_val(*pud));
+}
+
static void walk_pud_level(struct seq_file *m, struct pg_state *st, pgd_t addr,
unsigned long P)
{
int i;
pud_t *start;
pgprotval_t prot;
+ pud_t *prev_pud = NULL;
start = (pud_t *) pgd_page_vaddr(addr);
for (i = 0; i < PTRS_PER_PUD; i++) {
st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT);
- if (!pud_none(*start)) {
+ if (!pud_none(*start) &&
+ !pud_already_checked(prev_pud, start, st->check_wx)) {
if (pud_large(*start) || !pud_present(*start)) {
prot = pud_flags(*start);
note_page(m, st, __pgprot(prot), 2);
@@ -349,6 +362,7 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, pgd_t addr,
} else
note_page(m, st, __pgprot(0), 2);
+ prev_pud = start;
start++;
}
}
--
2.10.2
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 2/2] x86/mm/ptdump: Add address marker for KASAN shadow region
2017-02-14 10:08 [PATCH 1/2] x86/mm/ptdump: Optimize check for W+X mappings for CONFIG_KASAN=y Andrey Ryabinin
@ 2017-02-14 10:08 ` Andrey Ryabinin
2017-02-14 12:18 ` Alexander Potapenko
2017-02-16 18:59 ` [tip:x86/mm] " tip-bot for Andrey Ryabinin
2017-02-14 12:21 ` [PATCH 1/2] x86/mm/ptdump: Optimize check for W+X mappings for CONFIG_KASAN=y Alexander Potapenko
2017-02-16 18:58 ` [tip:x86/mm] " tip-bot for Andrey Ryabinin
2 siblings, 2 replies; 6+ messages in thread
From: Andrey Ryabinin @ 2017-02-14 10:08 UTC (permalink / raw)
To: x86, Thomas Gleixner, Ingo Molnar, H. Peter Anvin
Cc: kasan-dev, Alexander Potapenko, Dmitry Vyukov, linux-kernel,
Mark Rutland, Tobias Regnery, Andrey Ryabinin
Annotate the KASAN shadow with address markers in page table
dump output:
$ cat /sys/kernel/debug/kernel_page_tables
...
---[ Vmemmap ]---
0xffffea0000000000-0xffffea0003000000 48M RW PSE GLB NX pmd
0xffffea0003000000-0xffffea0004000000 16M pmd
0xffffea0004000000-0xffffea0005000000 16M RW PSE GLB NX pmd
0xffffea0005000000-0xffffea0040000000 944M pmd
0xffffea0040000000-0xffffea8000000000 511G pud
0xffffea8000000000-0xffffec0000000000 1536G pgd
---[ KASAN shadow ]---
0xffffec0000000000-0xffffed0000000000 1T ro GLB NX pte
0xffffed0000000000-0xffffed0018000000 384M RW PSE GLB NX pmd
0xffffed0018000000-0xffffed0020000000 128M pmd
0xffffed0020000000-0xffffed0028200000 130M RW PSE GLB NX pmd
0xffffed0028200000-0xffffed0040000000 382M pmd
0xffffed0040000000-0xffffed8000000000 511G pud
0xffffed8000000000-0xfffff50000000000 7680G pgd
0xfffff50000000000-0xfffffbfff0000000 7339776M ro GLB NX pte
0xfffffbfff0000000-0xfffffbfff0200000 2M pmd
0xfffffbfff0200000-0xfffffbfff0a00000 8M RW PSE GLB NX pmd
0xfffffbfff0a00000-0xfffffbffffe00000 244M pmd
0xfffffbffffe00000-0xfffffc0000000000 2M ro GLB NX pte
---[ KASAN shadow end ]---
0xfffffc0000000000-0xffffff0000000000 3T pgd
---[ ESPfix Area ]---
...
Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
---
arch/x86/mm/dump_pagetables.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index 0813534..58b5bee 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -18,6 +18,7 @@
#include <linux/sched.h>
#include <linux/seq_file.h>
+#include <asm/kasan.h>
#include <asm/pgtable.h>
/*
@@ -51,6 +52,10 @@ enum address_markers_idx {
LOW_KERNEL_NR,
VMALLOC_START_NR,
VMEMMAP_START_NR,
+#ifdef CONFIG_KASAN
+ KASAN_SHADOW_START_NR,
+ KASAN_SHADOW_END_NR,
+#endif
# ifdef CONFIG_X86_ESPFIX64
ESPFIX_START_NR,
# endif
@@ -76,6 +81,10 @@ static struct addr_marker address_markers[] = {
{ 0/* PAGE_OFFSET */, "Low Kernel Mapping" },
{ 0/* VMALLOC_START */, "vmalloc() Area" },
{ 0/* VMEMMAP_START */, "Vmemmap" },
+#ifdef CONFIG_KASAN
+ { KASAN_SHADOW_START, "KASAN shadow" },
+ { KASAN_SHADOW_END, "KASAN shadow end" },
+#endif
# ifdef CONFIG_X86_ESPFIX64
{ ESPFIX_BASE_ADDR, "ESPfix Area", 16 },
# endif
--
2.10.2
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH 2/2] x86/mm/ptdump: Add address marker for KASAN shadow region
2017-02-14 10:08 ` [PATCH 2/2] x86/mm/ptdump: Add address marker for KASAN shadow region Andrey Ryabinin
@ 2017-02-14 12:18 ` Alexander Potapenko
2017-02-16 18:59 ` [tip:x86/mm] " tip-bot for Andrey Ryabinin
1 sibling, 0 replies; 6+ messages in thread
From: Alexander Potapenko @ 2017-02-14 12:18 UTC (permalink / raw)
To: Andrey Ryabinin
Cc: x86, Thomas Gleixner, Ingo Molnar, H. Peter Anvin, kasan-dev,
Dmitry Vyukov, LKML, Mark Rutland, Tobias Regnery
On Tue, Feb 14, 2017 at 11:08 AM, Andrey Ryabinin
<aryabinin@virtuozzo.com> wrote:
> Annotate the KASAN shadow with address markers in page table
> dump output:
>
> $ cat /sys/kernel/debug/kernel_page_tables
> ...
> ---[ Vmemmap ]---
> 0xffffea0000000000-0xffffea0003000000 48M RW PSE GLB NX pmd
> 0xffffea0003000000-0xffffea0004000000 16M pmd
> 0xffffea0004000000-0xffffea0005000000 16M RW PSE GLB NX pmd
> 0xffffea0005000000-0xffffea0040000000 944M pmd
> 0xffffea0040000000-0xffffea8000000000 511G pud
> 0xffffea8000000000-0xffffec0000000000 1536G pgd
> ---[ KASAN shadow ]---
> 0xffffec0000000000-0xffffed0000000000 1T ro GLB NX pte
> 0xffffed0000000000-0xffffed0018000000 384M RW PSE GLB NX pmd
> 0xffffed0018000000-0xffffed0020000000 128M pmd
> 0xffffed0020000000-0xffffed0028200000 130M RW PSE GLB NX pmd
> 0xffffed0028200000-0xffffed0040000000 382M pmd
> 0xffffed0040000000-0xffffed8000000000 511G pud
> 0xffffed8000000000-0xfffff50000000000 7680G pgd
> 0xfffff50000000000-0xfffffbfff0000000 7339776M ro GLB NX pte
> 0xfffffbfff0000000-0xfffffbfff0200000 2M pmd
> 0xfffffbfff0200000-0xfffffbfff0a00000 8M RW PSE GLB NX pmd
> 0xfffffbfff0a00000-0xfffffbffffe00000 244M pmd
> 0xfffffbffffe00000-0xfffffc0000000000 2M ro GLB NX pte
> ---[ KASAN shadow end ]---
> 0xfffffc0000000000-0xffffff0000000000 3T pgd
> ---[ ESPfix Area ]---
> ...
>
> Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Reviewed-by: Alexander Potapenko <glider@google.com>
> ---
> arch/x86/mm/dump_pagetables.c | 9 +++++++++
> 1 file changed, 9 insertions(+)
>
> diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
> index 0813534..58b5bee 100644
> --- a/arch/x86/mm/dump_pagetables.c
> +++ b/arch/x86/mm/dump_pagetables.c
> @@ -18,6 +18,7 @@
> #include <linux/sched.h>
> #include <linux/seq_file.h>
>
> +#include <asm/kasan.h>
> #include <asm/pgtable.h>
>
> /*
> @@ -51,6 +52,10 @@ enum address_markers_idx {
> LOW_KERNEL_NR,
> VMALLOC_START_NR,
> VMEMMAP_START_NR,
> +#ifdef CONFIG_KASAN
> + KASAN_SHADOW_START_NR,
> + KASAN_SHADOW_END_NR,
> +#endif
> # ifdef CONFIG_X86_ESPFIX64
> ESPFIX_START_NR,
> # endif
> @@ -76,6 +81,10 @@ static struct addr_marker address_markers[] = {
> { 0/* PAGE_OFFSET */, "Low Kernel Mapping" },
> { 0/* VMALLOC_START */, "vmalloc() Area" },
> { 0/* VMEMMAP_START */, "Vmemmap" },
> +#ifdef CONFIG_KASAN
> + { KASAN_SHADOW_START, "KASAN shadow" },
> + { KASAN_SHADOW_END, "KASAN shadow end" },
> +#endif
> # ifdef CONFIG_X86_ESPFIX64
> { ESPFIX_BASE_ADDR, "ESPfix Area", 16 },
> # endif
> --
> 2.10.2
>
--
Alexander Potapenko
Software Engineer
Google Germany GmbH
Erika-Mann-Straße, 33
80636 München
Geschäftsführer: Matthew Scott Sucherman, Paul Terence Manicle
Registergericht und -nummer: Hamburg, HRB 86891
Sitz der Gesellschaft: Hamburg
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH 1/2] x86/mm/ptdump: Optimize check for W+X mappings for CONFIG_KASAN=y
2017-02-14 10:08 [PATCH 1/2] x86/mm/ptdump: Optimize check for W+X mappings for CONFIG_KASAN=y Andrey Ryabinin
2017-02-14 10:08 ` [PATCH 2/2] x86/mm/ptdump: Add address marker for KASAN shadow region Andrey Ryabinin
@ 2017-02-14 12:21 ` Alexander Potapenko
2017-02-16 18:58 ` [tip:x86/mm] " tip-bot for Andrey Ryabinin
2 siblings, 0 replies; 6+ messages in thread
From: Alexander Potapenko @ 2017-02-14 12:21 UTC (permalink / raw)
To: Andrey Ryabinin
Cc: x86, Thomas Gleixner, Ingo Molnar, H. Peter Anvin, kasan-dev,
Dmitry Vyukov, LKML, Mark Rutland, Tobias Regnery
On Tue, Feb 14, 2017 at 11:08 AM, Andrey Ryabinin
<aryabinin@virtuozzo.com> wrote:
> Enabling both DEBUG_WX=y and KASAN=y options significantly increases
> boot time (dozens of seconds at least).
> KASAN fills kernel page tables with repeated values to map several
> TBs of the virtual memory to the single kasan_zero_page:
>
> kasan_zero_pud ->
> kasan_zero_pmd->
> kasan_zero_pte->
> kasan_zero_page
>
> So, the page table walker used to find W+X mapping check the same
> kasan_zero_p?d page table entries a lot more than once.
> With patch pud walker will skip the pud if it has the same value as
> the previous one . Skipping done iff we search for W+X mappings,
> so this optimization won't affect the page table dump via debugfs.
>
> This dropped time spend in W+X check from ~30 sec to reasonable 0.1 sec:
>
> Before:
> [ 4.579991] Freeing unused kernel memory: 1000K
> [ 35.257523] x86/mm: Checked W+X mappings: passed, no W+X pages found.
>
> After:
> [ 5.138756] Freeing unused kernel memory: 1000K
> [ 5.266496] x86/mm: Checked W+X mappings: passed, no W+X pages found.
>
> Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Reviewed-by: Alexander Potapenko <glider@google.com>
> ---
> arch/x86/mm/dump_pagetables.c | 16 +++++++++++++++-
> 1 file changed, 15 insertions(+), 1 deletion(-)
>
> diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
> index 8aa6bea..0813534 100644
> --- a/arch/x86/mm/dump_pagetables.c
> +++ b/arch/x86/mm/dump_pagetables.c
> @@ -327,18 +327,31 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr,
>
> #if PTRS_PER_PUD > 1
>
> +/*
> + * This is an optimization for CONFIG_DEBUG_WX=y + CONFIG_KASAN=y
> + * KASAN fills page tables with the same values. Since there is no
> + * point in checking page table more than once we just skip repeated
> + * entries. This saves us dozens of seconds during boot.
> + */
> +static bool pud_already_checked(pud_t *prev_pud, pud_t *pud, bool checkwx)
> +{
> + return checkwx && prev_pud && (pud_val(*prev_pud) == pud_val(*pud));
> +}
> +
> static void walk_pud_level(struct seq_file *m, struct pg_state *st, pgd_t addr,
> unsigned long P)
> {
> int i;
> pud_t *start;
> pgprotval_t prot;
> + pud_t *prev_pud = NULL;
>
> start = (pud_t *) pgd_page_vaddr(addr);
>
> for (i = 0; i < PTRS_PER_PUD; i++) {
> st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT);
> - if (!pud_none(*start)) {
> + if (!pud_none(*start) &&
> + !pud_already_checked(prev_pud, start, st->check_wx)) {
> if (pud_large(*start) || !pud_present(*start)) {
> prot = pud_flags(*start);
> note_page(m, st, __pgprot(prot), 2);
> @@ -349,6 +362,7 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, pgd_t addr,
> } else
> note_page(m, st, __pgprot(0), 2);
>
> + prev_pud = start;
> start++;
> }
> }
> --
> 2.10.2
>
--
Alexander Potapenko
Software Engineer
Google Germany GmbH
Erika-Mann-Straße, 33
80636 München
Geschäftsführer: Matthew Scott Sucherman, Paul Terence Manicle
Registergericht und -nummer: Hamburg, HRB 86891
Sitz der Gesellschaft: Hamburg
^ permalink raw reply [flat|nested] 6+ messages in thread
* [tip:x86/mm] x86/mm/ptdump: Optimize check for W+X mappings for CONFIG_KASAN=y
2017-02-14 10:08 [PATCH 1/2] x86/mm/ptdump: Optimize check for W+X mappings for CONFIG_KASAN=y Andrey Ryabinin
2017-02-14 10:08 ` [PATCH 2/2] x86/mm/ptdump: Add address marker for KASAN shadow region Andrey Ryabinin
2017-02-14 12:21 ` [PATCH 1/2] x86/mm/ptdump: Optimize check for W+X mappings for CONFIG_KASAN=y Alexander Potapenko
@ 2017-02-16 18:58 ` tip-bot for Andrey Ryabinin
2 siblings, 0 replies; 6+ messages in thread
From: tip-bot for Andrey Ryabinin @ 2017-02-16 18:58 UTC (permalink / raw)
To: linux-tip-commits
Cc: dvyukov, aryabinin, hpa, glider, tglx, linux-kernel,
tobias.regnery, mingo, mark.rutland
Commit-ID: 243b72aae28ca1032284028323bb81c9235b15c9
Gitweb: http://git.kernel.org/tip/243b72aae28ca1032284028323bb81c9235b15c9
Author: Andrey Ryabinin <aryabinin@virtuozzo.com>
AuthorDate: Tue, 14 Feb 2017 13:08:38 +0300
Committer: Thomas Gleixner <tglx@linutronix.de>
CommitDate: Thu, 16 Feb 2017 19:53:25 +0100
x86/mm/ptdump: Optimize check for W+X mappings for CONFIG_KASAN=y
Enabling both DEBUG_WX=y and KASAN=y options significantly increases
boot time (dozens of seconds at least).
KASAN fills kernel page tables with repeated values to map several
TBs of the virtual memory to the single kasan_zero_page:
kasan_zero_pud ->
kasan_zero_pmd->
kasan_zero_pte->
kasan_zero_page
So, the page table walker used to find W+X mapping check the same
kasan_zero_p?d page table entries a lot more than once.
With patch pud walker will skip the pud if it has the same value as
the previous one . Skipping done iff we search for W+X mappings,
so this optimization won't affect the page table dump via debugfs.
This dropped time spend in W+X check from ~30 sec to reasonable 0.1 sec:
Before:
[ 4.579991] Freeing unused kernel memory: 1000K
[ 35.257523] x86/mm: Checked W+X mappings: passed, no W+X pages found.
After:
[ 5.138756] Freeing unused kernel memory: 1000K
[ 5.266496] x86/mm: Checked W+X mappings: passed, no W+X pages found.
Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Reviewed-by: Alexander Potapenko <glider@google.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: kasan-dev@googlegroups.com
Cc: Tobias Regnery <tobias.regnery@gmail.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Link: http://lkml.kernel.org/r/20170214100839.17186-1-aryabinin@virtuozzo.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/x86/mm/dump_pagetables.c | 16 +++++++++++++++-
1 file changed, 15 insertions(+), 1 deletion(-)
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index 8aa6bea..0813534 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -327,18 +327,31 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr,
#if PTRS_PER_PUD > 1
+/*
+ * This is an optimization for CONFIG_DEBUG_WX=y + CONFIG_KASAN=y
+ * KASAN fills page tables with the same values. Since there is no
+ * point in checking page table more than once we just skip repeated
+ * entries. This saves us dozens of seconds during boot.
+ */
+static bool pud_already_checked(pud_t *prev_pud, pud_t *pud, bool checkwx)
+{
+ return checkwx && prev_pud && (pud_val(*prev_pud) == pud_val(*pud));
+}
+
static void walk_pud_level(struct seq_file *m, struct pg_state *st, pgd_t addr,
unsigned long P)
{
int i;
pud_t *start;
pgprotval_t prot;
+ pud_t *prev_pud = NULL;
start = (pud_t *) pgd_page_vaddr(addr);
for (i = 0; i < PTRS_PER_PUD; i++) {
st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT);
- if (!pud_none(*start)) {
+ if (!pud_none(*start) &&
+ !pud_already_checked(prev_pud, start, st->check_wx)) {
if (pud_large(*start) || !pud_present(*start)) {
prot = pud_flags(*start);
note_page(m, st, __pgprot(prot), 2);
@@ -349,6 +362,7 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, pgd_t addr,
} else
note_page(m, st, __pgprot(0), 2);
+ prev_pud = start;
start++;
}
}
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [tip:x86/mm] x86/mm/ptdump: Add address marker for KASAN shadow region
2017-02-14 10:08 ` [PATCH 2/2] x86/mm/ptdump: Add address marker for KASAN shadow region Andrey Ryabinin
2017-02-14 12:18 ` Alexander Potapenko
@ 2017-02-16 18:59 ` tip-bot for Andrey Ryabinin
1 sibling, 0 replies; 6+ messages in thread
From: tip-bot for Andrey Ryabinin @ 2017-02-16 18:59 UTC (permalink / raw)
To: linux-tip-commits
Cc: hpa, mark.rutland, mingo, glider, tglx, aryabinin, linux-kernel,
tobias.regnery, dvyukov
Commit-ID: 025205f8f30c6ab52b69bf34fb359ac80360fefd
Gitweb: http://git.kernel.org/tip/025205f8f30c6ab52b69bf34fb359ac80360fefd
Author: Andrey Ryabinin <aryabinin@virtuozzo.com>
AuthorDate: Tue, 14 Feb 2017 13:08:39 +0300
Committer: Thomas Gleixner <tglx@linutronix.de>
CommitDate: Thu, 16 Feb 2017 19:53:25 +0100
x86/mm/ptdump: Add address marker for KASAN shadow region
Annotate the KASAN shadow with address markers in page table
dump output:
$ cat /sys/kernel/debug/kernel_page_tables
...
---[ Vmemmap ]---
0xffffea0000000000-0xffffea0003000000 48M RW PSE GLB NX pmd
0xffffea0003000000-0xffffea0004000000 16M pmd
0xffffea0004000000-0xffffea0005000000 16M RW PSE GLB NX pmd
0xffffea0005000000-0xffffea0040000000 944M pmd
0xffffea0040000000-0xffffea8000000000 511G pud
0xffffea8000000000-0xffffec0000000000 1536G pgd
---[ KASAN shadow ]---
0xffffec0000000000-0xffffed0000000000 1T ro GLB NX pte
0xffffed0000000000-0xffffed0018000000 384M RW PSE GLB NX pmd
0xffffed0018000000-0xffffed0020000000 128M pmd
0xffffed0020000000-0xffffed0028200000 130M RW PSE GLB NX pmd
0xffffed0028200000-0xffffed0040000000 382M pmd
0xffffed0040000000-0xffffed8000000000 511G pud
0xffffed8000000000-0xfffff50000000000 7680G pgd
0xfffff50000000000-0xfffffbfff0000000 7339776M ro GLB NX pte
0xfffffbfff0000000-0xfffffbfff0200000 2M pmd
0xfffffbfff0200000-0xfffffbfff0a00000 8M RW PSE GLB NX pmd
0xfffffbfff0a00000-0xfffffbffffe00000 244M pmd
0xfffffbffffe00000-0xfffffc0000000000 2M ro GLB NX pte
---[ KASAN shadow end ]---
0xfffffc0000000000-0xffffff0000000000 3T pgd
---[ ESPfix Area ]---
...
Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Reviewed-by: Alexander Potapenko <glider@google.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: kasan-dev@googlegroups.com
Cc: Tobias Regnery <tobias.regnery@gmail.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Link: http://lkml.kernel.org/r/20170214100839.17186-2-aryabinin@virtuozzo.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/x86/mm/dump_pagetables.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index 0813534..58b5bee 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -18,6 +18,7 @@
#include <linux/sched.h>
#include <linux/seq_file.h>
+#include <asm/kasan.h>
#include <asm/pgtable.h>
/*
@@ -51,6 +52,10 @@ enum address_markers_idx {
LOW_KERNEL_NR,
VMALLOC_START_NR,
VMEMMAP_START_NR,
+#ifdef CONFIG_KASAN
+ KASAN_SHADOW_START_NR,
+ KASAN_SHADOW_END_NR,
+#endif
# ifdef CONFIG_X86_ESPFIX64
ESPFIX_START_NR,
# endif
@@ -76,6 +81,10 @@ static struct addr_marker address_markers[] = {
{ 0/* PAGE_OFFSET */, "Low Kernel Mapping" },
{ 0/* VMALLOC_START */, "vmalloc() Area" },
{ 0/* VMEMMAP_START */, "Vmemmap" },
+#ifdef CONFIG_KASAN
+ { KASAN_SHADOW_START, "KASAN shadow" },
+ { KASAN_SHADOW_END, "KASAN shadow end" },
+#endif
# ifdef CONFIG_X86_ESPFIX64
{ ESPFIX_BASE_ADDR, "ESPfix Area", 16 },
# endif
^ permalink raw reply related [flat|nested] 6+ messages in thread
end of thread, other threads:[~2017-02-16 18:59 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2017-02-14 10:08 [PATCH 1/2] x86/mm/ptdump: Optimize check for W+X mappings for CONFIG_KASAN=y Andrey Ryabinin
2017-02-14 10:08 ` [PATCH 2/2] x86/mm/ptdump: Add address marker for KASAN shadow region Andrey Ryabinin
2017-02-14 12:18 ` Alexander Potapenko
2017-02-16 18:59 ` [tip:x86/mm] " tip-bot for Andrey Ryabinin
2017-02-14 12:21 ` [PATCH 1/2] x86/mm/ptdump: Optimize check for W+X mappings for CONFIG_KASAN=y Alexander Potapenko
2017-02-16 18:58 ` [tip:x86/mm] " tip-bot for Andrey Ryabinin
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox