* [PATCH 1/5] x86_64: fix kasan shadow region page tables
[not found] <1435654466-8714-1-git-send-email-a.ryabinin@samsung.com>
@ 2015-06-30 8:54 ` Andrey Ryabinin
2015-06-30 8:54 ` [PATCH 2/5] x86_64: kasan: flush tlbs after switching cr3 Andrey Ryabinin
` (2 subsequent siblings)
3 siblings, 0 replies; 9+ messages in thread
From: Andrey Ryabinin @ 2015-06-30 8:54 UTC (permalink / raw)
To: Ingo Molnar, H. Peter Anvin, Thomas Gleixner, x86
Cc: Andrey Konovalov, Andrew Morton, Borislav Petkov, Alexander Popov,
Dmitry Vyukov, Alexander Potapenko, Andrey Ryabinin, stable
From: Alexander Popov <alpopov@ptsecurity.com>
Currently kasan shadow region page tables created without respect of
physical offset (phys_base). This causes kernel halt when phys_base
is not zero.
So let's initialize kasan shadow region page tables in kasan_early_init()
using __pa_nodebug() which considers phys_base.
This patch also cleans up x86_64_start_kernel() from kasan low level details
by moving kasan_map_early_shadow(init_level4_pgt) into kasan_early_init().
That requires to move clear_page(init_level4_pgt) before kasan_early_init().
Remove the comment before clear_bss() which stopped bringing much
profit to the code readability. Otherwise describing all the new
order dependencies would be too verbose.
Signed-off-by: Alexander Popov <alpopov@ptsecurity.com>
Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com>
Cc: <stable@vger.kernel.org> # 4.0
---
arch/x86/include/asm/kasan.h | 8 ++------
arch/x86/kernel/head64.c | 10 ++++------
arch/x86/kernel/head_64.S | 29 -----------------------------
arch/x86/mm/kasan_init_64.c | 36 ++++++++++++++++++++++++++++++++++--
4 files changed, 40 insertions(+), 43 deletions(-)
diff --git a/arch/x86/include/asm/kasan.h b/arch/x86/include/asm/kasan.h
index 8b22422..74a2a8d 100644
--- a/arch/x86/include/asm/kasan.h
+++ b/arch/x86/include/asm/kasan.h
@@ -14,15 +14,11 @@
#ifndef __ASSEMBLY__
-extern pte_t kasan_zero_pte[];
-extern pte_t kasan_zero_pmd[];
-extern pte_t kasan_zero_pud[];
-
#ifdef CONFIG_KASAN
-void __init kasan_map_early_shadow(pgd_t *pgd);
+void __init kasan_early_init(void);
void __init kasan_init(void);
#else
-static inline void kasan_map_early_shadow(pgd_t *pgd) { }
+static inline void kasan_early_init(void) { }
static inline void kasan_init(void) { }
#endif
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 5a46681..f129a9a 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -161,11 +161,12 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
/* Kill off the identity-map trampoline */
reset_early_page_tables();
- kasan_map_early_shadow(early_level4_pgt);
-
- /* clear bss before set_intr_gate with early_idt_handler */
clear_bss();
+ clear_page(init_level4_pgt);
+
+ kasan_early_init();
+
for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
set_intr_gate(i, early_idt_handler_array[i]);
load_idt((const struct desc_ptr *)&idt_descr);
@@ -177,12 +178,9 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
*/
load_ucode_bsp();
- clear_page(init_level4_pgt);
/* set init_level4_pgt kernel high mapping*/
init_level4_pgt[511] = early_level4_pgt[511];
- kasan_map_early_shadow(init_level4_pgt);
-
x86_64_start_reservations(real_mode_data);
}
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index df7e780..7e5da2c 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -516,38 +516,9 @@ ENTRY(phys_base)
/* This must match the first entry in level2_kernel_pgt */
.quad 0x0000000000000000
-#ifdef CONFIG_KASAN
-#define FILL(VAL, COUNT) \
- .rept (COUNT) ; \
- .quad (VAL) ; \
- .endr
-
-NEXT_PAGE(kasan_zero_pte)
- FILL(kasan_zero_page - __START_KERNEL_map + _KERNPG_TABLE, 512)
-NEXT_PAGE(kasan_zero_pmd)
- FILL(kasan_zero_pte - __START_KERNEL_map + _KERNPG_TABLE, 512)
-NEXT_PAGE(kasan_zero_pud)
- FILL(kasan_zero_pmd - __START_KERNEL_map + _KERNPG_TABLE, 512)
-
-#undef FILL
-#endif
-
-
#include "../../x86/xen/xen-head.S"
__PAGE_ALIGNED_BSS
NEXT_PAGE(empty_zero_page)
.skip PAGE_SIZE
-#ifdef CONFIG_KASAN
-/*
- * This page used as early shadow. We don't use empty_zero_page
- * at early stages, stack instrumentation could write some garbage
- * to this page.
- * Latter we reuse it as zero shadow for large ranges of memory
- * that allowed to access, but not instrumented by kasan
- * (vmalloc/vmemmap ...).
- */
-NEXT_PAGE(kasan_zero_page)
- .skip PAGE_SIZE
-#endif
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 4860906..0e4a05f 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -11,7 +11,19 @@
extern pgd_t early_level4_pgt[PTRS_PER_PGD];
extern struct range pfn_mapped[E820_X_MAX];
-extern unsigned char kasan_zero_page[PAGE_SIZE];
+static pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
+static pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss;
+static pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss;
+
+/*
+ * This page used as early shadow. We don't use empty_zero_page
+ * at early stages, stack instrumentation could write some garbage
+ * to this page.
+ * Latter we reuse it as zero shadow for large ranges of memory
+ * that allowed to access, but not instrumented by kasan
+ * (vmalloc/vmemmap ...).
+ */
+static unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
static int __init map_range(struct range *range)
{
@@ -36,7 +48,7 @@ static void __init clear_pgds(unsigned long start,
pgd_clear(pgd_offset_k(start));
}
-void __init kasan_map_early_shadow(pgd_t *pgd)
+static void __init kasan_map_early_shadow(pgd_t *pgd)
{
int i;
unsigned long start = KASAN_SHADOW_START;
@@ -166,6 +178,26 @@ static struct notifier_block kasan_die_notifier = {
};
#endif
+void __init kasan_early_init(void)
+{
+ int i;
+ pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL;
+ pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
+ pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
+
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ kasan_zero_pte[i] = __pte(pte_val);
+
+ for (i = 0; i < PTRS_PER_PMD; i++)
+ kasan_zero_pmd[i] = __pmd(pmd_val);
+
+ for (i = 0; i < PTRS_PER_PUD; i++)
+ kasan_zero_pud[i] = __pud(pud_val);
+
+ kasan_map_early_shadow(early_level4_pgt);
+ kasan_map_early_shadow(init_level4_pgt);
+}
+
void __init kasan_init(void)
{
int i;
--
2.4.4
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [PATCH 2/5] x86_64: kasan: flush tlbs after switching cr3
[not found] <1435654466-8714-1-git-send-email-a.ryabinin@samsung.com>
2015-06-30 8:54 ` [PATCH 1/5] x86_64: fix kasan shadow region page tables Andrey Ryabinin
@ 2015-06-30 8:54 ` Andrey Ryabinin
2015-06-30 8:54 ` [PATCH 3/5] x86_64: kasan: fix boot crash on AMD processors Andrey Ryabinin
[not found] ` <1435654811-8915-1-git-send-email-a.ryabinin@samsung.com>
3 siblings, 0 replies; 9+ messages in thread
From: Andrey Ryabinin @ 2015-06-30 8:54 UTC (permalink / raw)
To: Ingo Molnar, H. Peter Anvin, Thomas Gleixner, x86
Cc: Andrey Konovalov, Andrew Morton, Borislav Petkov, Alexander Popov,
Dmitry Vyukov, Alexander Potapenko, Andrey Ryabinin, stable
load_cr3() doesn't cause tlb_flush if PGE enabled.
This may cause tons of false positive reports spamming
kernel to death.
To fix this __flush_tlb_all() should be called explicitly
after cr3 changed.
Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com>
Cc: <stable@vger.kernel.org> # 4.0
---
arch/x86/mm/kasan_init_64.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 0e4a05f..5d26642 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -208,6 +208,7 @@ void __init kasan_init(void)
memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt));
load_cr3(early_level4_pgt);
+ __flush_tlb_all();
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
@@ -234,5 +235,6 @@ void __init kasan_init(void)
memset(kasan_zero_page, 0, PAGE_SIZE);
load_cr3(init_level4_pgt);
+ __flush_tlb_all();
init_task.kasan_depth = 0;
}
--
2.4.4
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [PATCH 3/5] x86_64: kasan: fix boot crash on AMD processors
[not found] <1435654466-8714-1-git-send-email-a.ryabinin@samsung.com>
2015-06-30 8:54 ` [PATCH 1/5] x86_64: fix kasan shadow region page tables Andrey Ryabinin
2015-06-30 8:54 ` [PATCH 2/5] x86_64: kasan: flush tlbs after switching cr3 Andrey Ryabinin
@ 2015-06-30 8:54 ` Andrey Ryabinin
[not found] ` <1435654811-8915-1-git-send-email-a.ryabinin@samsung.com>
3 siblings, 0 replies; 9+ messages in thread
From: Andrey Ryabinin @ 2015-06-30 8:54 UTC (permalink / raw)
To: Ingo Molnar, H. Peter Anvin, Thomas Gleixner, x86
Cc: Andrey Konovalov, Andrew Morton, Borislav Petkov, Alexander Popov,
Dmitry Vyukov, Alexander Potapenko, Andrey Ryabinin, stable
While populating zero shadow wrong bits in upper level page tables
used. __PAGE_KERNEL_RO that was used for pgd/pud/pmd has
_PAGE_BIT_GLOBAL set. Global bit is present only in the lowest
level of the page translation hierarchy (ptes), and it should be zero
in upper levels.
This bug seems doesn't cause any troubles on Intel cpus, while on AMDs
it cause kernel crash on boot.
Use _KERNPG_TABLE bits for pgds/puds/pmds to fix this.
Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com>
Reported-by: Borislav Petkov <bp@alien8.de>
Cc: <stable@vger.kernel.org> # 4.0
---
arch/x86/mm/kasan_init_64.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 5d26642..9a54dbe 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -85,7 +85,7 @@ static int __init zero_pmd_populate(pud_t *pud, unsigned long addr,
while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) {
WARN_ON(!pmd_none(*pmd));
set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte)
- | __PAGE_KERNEL_RO));
+ | _KERNPG_TABLE));
addr += PMD_SIZE;
pmd = pmd_offset(pud, addr);
}
@@ -111,7 +111,7 @@ static int __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) {
WARN_ON(!pud_none(*pud));
set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd)
- | __PAGE_KERNEL_RO));
+ | _KERNPG_TABLE));
addr += PUD_SIZE;
pud = pud_offset(pgd, addr);
}
@@ -136,7 +136,7 @@ static int __init zero_pgd_populate(unsigned long addr, unsigned long end)
while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) {
WARN_ON(!pgd_none(*pgd));
set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud)
- | __PAGE_KERNEL_RO));
+ | _KERNPG_TABLE));
addr += PGDIR_SIZE;
pgd = pgd_offset_k(addr);
}
--
2.4.4
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [PATCH 1/5] x86_64: fix kasan shadow region page tables
[not found] ` <1435654811-8915-1-git-send-email-a.ryabinin@samsung.com>
@ 2015-06-30 9:00 ` Andrey Ryabinin
2015-07-01 9:07 ` Ingo Molnar
2015-06-30 9:00 ` [PATCH 2/5] x86_64: kasan: flush tlbs after switching cr3 Andrey Ryabinin
2015-06-30 9:00 ` [PATCH 3/5] x86_64: kasan: fix boot crash on AMD processors Andrey Ryabinin
2 siblings, 1 reply; 9+ messages in thread
From: Andrey Ryabinin @ 2015-06-30 9:00 UTC (permalink / raw)
To: Ingo Molnar, H. Peter Anvin, Thomas Gleixner, x86
Cc: Andrey Konovalov, Andrew Morton, Borislav Petkov, Alexander Popov,
Dmitry Vyukov, Alexander Potapenko, linux-kernel, Andrey Ryabinin,
stable
From: Alexander Popov <alpopov@ptsecurity.com>
Currently kasan shadow region page tables created without respect of
physical offset (phys_base). This causes kernel halt when phys_base
is not zero.
So let's initialize kasan shadow region page tables in kasan_early_init()
using __pa_nodebug() which considers phys_base.
This patch also cleans up x86_64_start_kernel() from kasan low level details
by moving kasan_map_early_shadow(init_level4_pgt) into kasan_early_init().
That requires to move clear_page(init_level4_pgt) before kasan_early_init().
Remove the comment before clear_bss() which stopped bringing much
profit to the code readability. Otherwise describing all the new
order dependencies would be too verbose.
Signed-off-by: Alexander Popov <alpopov@ptsecurity.com>
Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com>
Cc: <stable@vger.kernel.org> # 4.0
---
arch/x86/include/asm/kasan.h | 8 ++------
arch/x86/kernel/head64.c | 10 ++++------
arch/x86/kernel/head_64.S | 29 -----------------------------
arch/x86/mm/kasan_init_64.c | 36 ++++++++++++++++++++++++++++++++++--
4 files changed, 40 insertions(+), 43 deletions(-)
diff --git a/arch/x86/include/asm/kasan.h b/arch/x86/include/asm/kasan.h
index 8b22422..74a2a8d 100644
--- a/arch/x86/include/asm/kasan.h
+++ b/arch/x86/include/asm/kasan.h
@@ -14,15 +14,11 @@
#ifndef __ASSEMBLY__
-extern pte_t kasan_zero_pte[];
-extern pte_t kasan_zero_pmd[];
-extern pte_t kasan_zero_pud[];
-
#ifdef CONFIG_KASAN
-void __init kasan_map_early_shadow(pgd_t *pgd);
+void __init kasan_early_init(void);
void __init kasan_init(void);
#else
-static inline void kasan_map_early_shadow(pgd_t *pgd) { }
+static inline void kasan_early_init(void) { }
static inline void kasan_init(void) { }
#endif
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 5a46681..f129a9a 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -161,11 +161,12 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
/* Kill off the identity-map trampoline */
reset_early_page_tables();
- kasan_map_early_shadow(early_level4_pgt);
-
- /* clear bss before set_intr_gate with early_idt_handler */
clear_bss();
+ clear_page(init_level4_pgt);
+
+ kasan_early_init();
+
for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
set_intr_gate(i, early_idt_handler_array[i]);
load_idt((const struct desc_ptr *)&idt_descr);
@@ -177,12 +178,9 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
*/
load_ucode_bsp();
- clear_page(init_level4_pgt);
/* set init_level4_pgt kernel high mapping*/
init_level4_pgt[511] = early_level4_pgt[511];
- kasan_map_early_shadow(init_level4_pgt);
-
x86_64_start_reservations(real_mode_data);
}
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index df7e780..7e5da2c 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -516,38 +516,9 @@ ENTRY(phys_base)
/* This must match the first entry in level2_kernel_pgt */
.quad 0x0000000000000000
-#ifdef CONFIG_KASAN
-#define FILL(VAL, COUNT) \
- .rept (COUNT) ; \
- .quad (VAL) ; \
- .endr
-
-NEXT_PAGE(kasan_zero_pte)
- FILL(kasan_zero_page - __START_KERNEL_map + _KERNPG_TABLE, 512)
-NEXT_PAGE(kasan_zero_pmd)
- FILL(kasan_zero_pte - __START_KERNEL_map + _KERNPG_TABLE, 512)
-NEXT_PAGE(kasan_zero_pud)
- FILL(kasan_zero_pmd - __START_KERNEL_map + _KERNPG_TABLE, 512)
-
-#undef FILL
-#endif
-
-
#include "../../x86/xen/xen-head.S"
__PAGE_ALIGNED_BSS
NEXT_PAGE(empty_zero_page)
.skip PAGE_SIZE
-#ifdef CONFIG_KASAN
-/*
- * This page used as early shadow. We don't use empty_zero_page
- * at early stages, stack instrumentation could write some garbage
- * to this page.
- * Latter we reuse it as zero shadow for large ranges of memory
- * that allowed to access, but not instrumented by kasan
- * (vmalloc/vmemmap ...).
- */
-NEXT_PAGE(kasan_zero_page)
- .skip PAGE_SIZE
-#endif
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 4860906..0e4a05f 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -11,7 +11,19 @@
extern pgd_t early_level4_pgt[PTRS_PER_PGD];
extern struct range pfn_mapped[E820_X_MAX];
-extern unsigned char kasan_zero_page[PAGE_SIZE];
+static pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
+static pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss;
+static pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss;
+
+/*
+ * This page used as early shadow. We don't use empty_zero_page
+ * at early stages, stack instrumentation could write some garbage
+ * to this page.
+ * Latter we reuse it as zero shadow for large ranges of memory
+ * that allowed to access, but not instrumented by kasan
+ * (vmalloc/vmemmap ...).
+ */
+static unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
static int __init map_range(struct range *range)
{
@@ -36,7 +48,7 @@ static void __init clear_pgds(unsigned long start,
pgd_clear(pgd_offset_k(start));
}
-void __init kasan_map_early_shadow(pgd_t *pgd)
+static void __init kasan_map_early_shadow(pgd_t *pgd)
{
int i;
unsigned long start = KASAN_SHADOW_START;
@@ -166,6 +178,26 @@ static struct notifier_block kasan_die_notifier = {
};
#endif
+void __init kasan_early_init(void)
+{
+ int i;
+ pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL;
+ pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
+ pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
+
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ kasan_zero_pte[i] = __pte(pte_val);
+
+ for (i = 0; i < PTRS_PER_PMD; i++)
+ kasan_zero_pmd[i] = __pmd(pmd_val);
+
+ for (i = 0; i < PTRS_PER_PUD; i++)
+ kasan_zero_pud[i] = __pud(pud_val);
+
+ kasan_map_early_shadow(early_level4_pgt);
+ kasan_map_early_shadow(init_level4_pgt);
+}
+
void __init kasan_init(void)
{
int i;
--
2.4.4
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [PATCH 2/5] x86_64: kasan: flush tlbs after switching cr3
[not found] ` <1435654811-8915-1-git-send-email-a.ryabinin@samsung.com>
2015-06-30 9:00 ` [PATCH 1/5] x86_64: fix kasan shadow region page tables Andrey Ryabinin
@ 2015-06-30 9:00 ` Andrey Ryabinin
2015-06-30 9:00 ` [PATCH 3/5] x86_64: kasan: fix boot crash on AMD processors Andrey Ryabinin
2 siblings, 0 replies; 9+ messages in thread
From: Andrey Ryabinin @ 2015-06-30 9:00 UTC (permalink / raw)
To: Ingo Molnar, H. Peter Anvin, Thomas Gleixner, x86
Cc: Andrey Konovalov, Andrew Morton, Borislav Petkov, Alexander Popov,
Dmitry Vyukov, Alexander Potapenko, linux-kernel, Andrey Ryabinin,
stable
load_cr3() doesn't cause tlb_flush if PGE enabled.
This may cause tons of false positive reports spamming
kernel to death.
To fix this __flush_tlb_all() should be called explicitly
after cr3 changed.
Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com>
Cc: <stable@vger.kernel.org> # 4.0
---
arch/x86/mm/kasan_init_64.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 0e4a05f..5d26642 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -208,6 +208,7 @@ void __init kasan_init(void)
memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt));
load_cr3(early_level4_pgt);
+ __flush_tlb_all();
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
@@ -234,5 +235,6 @@ void __init kasan_init(void)
memset(kasan_zero_page, 0, PAGE_SIZE);
load_cr3(init_level4_pgt);
+ __flush_tlb_all();
init_task.kasan_depth = 0;
}
--
2.4.4
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [PATCH 3/5] x86_64: kasan: fix boot crash on AMD processors
[not found] ` <1435654811-8915-1-git-send-email-a.ryabinin@samsung.com>
2015-06-30 9:00 ` [PATCH 1/5] x86_64: fix kasan shadow region page tables Andrey Ryabinin
2015-06-30 9:00 ` [PATCH 2/5] x86_64: kasan: flush tlbs after switching cr3 Andrey Ryabinin
@ 2015-06-30 9:00 ` Andrey Ryabinin
2 siblings, 0 replies; 9+ messages in thread
From: Andrey Ryabinin @ 2015-06-30 9:00 UTC (permalink / raw)
To: Ingo Molnar, H. Peter Anvin, Thomas Gleixner, x86
Cc: Andrey Konovalov, Andrew Morton, Borislav Petkov, Alexander Popov,
Dmitry Vyukov, Alexander Potapenko, linux-kernel, Andrey Ryabinin,
stable
While populating zero shadow wrong bits in upper level page tables
used. __PAGE_KERNEL_RO that was used for pgd/pud/pmd has
_PAGE_BIT_GLOBAL set. Global bit is present only in the lowest
level of the page translation hierarchy (ptes), and it should be zero
in upper levels.
This bug seems doesn't cause any troubles on Intel cpus, while on AMDs
it cause kernel crash on boot.
Use _KERNPG_TABLE bits for pgds/puds/pmds to fix this.
Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com>
Reported-by: Borislav Petkov <bp@alien8.de>
Cc: <stable@vger.kernel.org> # 4.0
---
arch/x86/mm/kasan_init_64.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 5d26642..9a54dbe 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -85,7 +85,7 @@ static int __init zero_pmd_populate(pud_t *pud, unsigned long addr,
while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) {
WARN_ON(!pmd_none(*pmd));
set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte)
- | __PAGE_KERNEL_RO));
+ | _KERNPG_TABLE));
addr += PMD_SIZE;
pmd = pmd_offset(pud, addr);
}
@@ -111,7 +111,7 @@ static int __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) {
WARN_ON(!pud_none(*pud));
set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd)
- | __PAGE_KERNEL_RO));
+ | _KERNPG_TABLE));
addr += PUD_SIZE;
pud = pud_offset(pgd, addr);
}
@@ -136,7 +136,7 @@ static int __init zero_pgd_populate(unsigned long addr, unsigned long end)
while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) {
WARN_ON(!pgd_none(*pgd));
set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud)
- | __PAGE_KERNEL_RO));
+ | _KERNPG_TABLE));
addr += PGDIR_SIZE;
pgd = pgd_offset_k(addr);
}
--
2.4.4
^ permalink raw reply related [flat|nested] 9+ messages in thread
* Re: [PATCH 1/5] x86_64: fix kasan shadow region page tables
2015-06-30 9:00 ` [PATCH 1/5] x86_64: fix kasan shadow region page tables Andrey Ryabinin
@ 2015-07-01 9:07 ` Ingo Molnar
2015-07-01 12:13 ` Andrey Ryabinin
0 siblings, 1 reply; 9+ messages in thread
From: Ingo Molnar @ 2015-07-01 9:07 UTC (permalink / raw)
To: Andrey Ryabinin
Cc: Ingo Molnar, H. Peter Anvin, Thomas Gleixner, x86,
Andrey Konovalov, Andrew Morton, Borislav Petkov, Alexander Popov,
Dmitry Vyukov, Alexander Potapenko, linux-kernel, stable
* Andrey Ryabinin <a.ryabinin@samsung.com> wrote:
> diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
> index 5a46681..f129a9a 100644
> --- a/arch/x86/kernel/head64.c
> +++ b/arch/x86/kernel/head64.c
> @@ -161,11 +161,12 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
> /* Kill off the identity-map trampoline */
> reset_early_page_tables();
>
> - kasan_map_early_shadow(early_level4_pgt);
> -
> - /* clear bss before set_intr_gate with early_idt_handler */
> clear_bss();
>
> + clear_page(init_level4_pgt);
> +
> + kasan_early_init();
> +
> for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
> set_intr_gate(i, early_idt_handler_array[i]);
> load_idt((const struct desc_ptr *)&idt_descr);
> @@ -177,12 +178,9 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
> */
> load_ucode_bsp();
>
> - clear_page(init_level4_pgt);
> /* set init_level4_pgt kernel high mapping*/
> init_level4_pgt[511] = early_level4_pgt[511];
>
> - kasan_map_early_shadow(init_level4_pgt);
> -
> x86_64_start_reservations(real_mode_data);
> }
>
So this changes generic code (moves the clear_page(init_level4_pgt) call), but the
changelog claims it's a KASAN-specific change.
Please split this into two patches: the first one does the generic change, the
second one the KASAN specific one.
That way if anything breaks in the generic code due to this change we have a
simple commit to bisect to.
Thanks,
Ingo
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH 1/5] x86_64: fix kasan shadow region page tables
2015-07-01 9:07 ` Ingo Molnar
@ 2015-07-01 12:13 ` Andrey Ryabinin
2015-07-02 7:56 ` Ingo Molnar
0 siblings, 1 reply; 9+ messages in thread
From: Andrey Ryabinin @ 2015-07-01 12:13 UTC (permalink / raw)
To: Ingo Molnar
Cc: Ingo Molnar, H. Peter Anvin, Thomas Gleixner, x86,
Andrey Konovalov, Andrew Morton, Borislav Petkov, Alexander Popov,
Dmitry Vyukov, Alexander Potapenko, linux-kernel, stable
On 07/01/2015 12:07 PM, Ingo Molnar wrote:
>
> * Andrey Ryabinin <a.ryabinin@samsung.com> wrote:
>
>> diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
>> index 5a46681..f129a9a 100644
>> --- a/arch/x86/kernel/head64.c
>> +++ b/arch/x86/kernel/head64.c
>> @@ -161,11 +161,12 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
>> /* Kill off the identity-map trampoline */
>> reset_early_page_tables();
>>
>> - kasan_map_early_shadow(early_level4_pgt);
>> -
>> - /* clear bss before set_intr_gate with early_idt_handler */
>> clear_bss();
>>
>> + clear_page(init_level4_pgt);
>> +
>> + kasan_early_init();
>> +
>> for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
>> set_intr_gate(i, early_idt_handler_array[i]);
>> load_idt((const struct desc_ptr *)&idt_descr);
>> @@ -177,12 +178,9 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
>> */
>> load_ucode_bsp();
>>
>> - clear_page(init_level4_pgt);
>> /* set init_level4_pgt kernel high mapping*/
>> init_level4_pgt[511] = early_level4_pgt[511];
>>
>> - kasan_map_early_shadow(init_level4_pgt);
>> -
>> x86_64_start_reservations(real_mode_data);
>> }
>>
>
> So this changes generic code (moves the clear_page(init_level4_pgt) call), but the
> changelog claims it's a KASAN-specific change.
>
> Please split this into two patches: the first one does the generic change, the
> second one the KASAN specific one.
>
Hm... We will need to backport that generic change to stable, because second change depends on it.
So, maybe split this on three changes:
#1 fix kasan page tables (without touching clear_page() or kasan_map_early_shadow(init_level4_pgt))
#2 generic move clear_page()
#3 clean up kasan initialization (move kasan_map_early_shadow(init_level4_pgt)).
Only #1 will have stable tag. Does that makes sense?
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH 1/5] x86_64: fix kasan shadow region page tables
2015-07-01 12:13 ` Andrey Ryabinin
@ 2015-07-02 7:56 ` Ingo Molnar
0 siblings, 0 replies; 9+ messages in thread
From: Ingo Molnar @ 2015-07-02 7:56 UTC (permalink / raw)
To: Andrey Ryabinin
Cc: Ingo Molnar, H. Peter Anvin, Thomas Gleixner, x86,
Andrey Konovalov, Andrew Morton, Borislav Petkov, Alexander Popov,
Dmitry Vyukov, Alexander Potapenko, linux-kernel, stable
* Andrey Ryabinin <a.ryabinin@samsung.com> wrote:
> On 07/01/2015 12:07 PM, Ingo Molnar wrote:
> >
> > * Andrey Ryabinin <a.ryabinin@samsung.com> wrote:
> >
> >> diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
> >> index 5a46681..f129a9a 100644
> >> --- a/arch/x86/kernel/head64.c
> >> +++ b/arch/x86/kernel/head64.c
> >> @@ -161,11 +161,12 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
> >> /* Kill off the identity-map trampoline */
> >> reset_early_page_tables();
> >>
> >> - kasan_map_early_shadow(early_level4_pgt);
> >> -
> >> - /* clear bss before set_intr_gate with early_idt_handler */
> >> clear_bss();
> >>
> >> + clear_page(init_level4_pgt);
> >> +
> >> + kasan_early_init();
> >> +
> >> for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
> >> set_intr_gate(i, early_idt_handler_array[i]);
> >> load_idt((const struct desc_ptr *)&idt_descr);
> >> @@ -177,12 +178,9 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
> >> */
> >> load_ucode_bsp();
> >>
> >> - clear_page(init_level4_pgt);
> >> /* set init_level4_pgt kernel high mapping*/
> >> init_level4_pgt[511] = early_level4_pgt[511];
> >>
> >> - kasan_map_early_shadow(init_level4_pgt);
> >> -
> >> x86_64_start_reservations(real_mode_data);
> >> }
> >>
> >
> > So this changes generic code (moves the clear_page(init_level4_pgt) call), but the
> > changelog claims it's a KASAN-specific change.
> >
> > Please split this into two patches: the first one does the generic change, the
> > second one the KASAN specific one.
> >
>
> Hm... We will need to backport that generic change to stable, because second change depends on it.
> So, maybe split this on three changes:
>
> #1 fix kasan page tables (without touching clear_page() or kasan_map_early_shadow(init_level4_pgt))
> #2 generic move clear_page()
> #3 clean up kasan initialization (move kasan_map_early_shadow(init_level4_pgt)).
>
> Only #1 will have stable tag. Does that makes sense?
I wouldn't overcomplicate it - just split it up and both patches can get the
stable tag just fine...
Thanks,
Ingo
^ permalink raw reply [flat|nested] 9+ messages in thread
end of thread, other threads:[~2015-07-02 7:56 UTC | newest]
Thread overview: 9+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
[not found] <1435654466-8714-1-git-send-email-a.ryabinin@samsung.com>
2015-06-30 8:54 ` [PATCH 1/5] x86_64: fix kasan shadow region page tables Andrey Ryabinin
2015-06-30 8:54 ` [PATCH 2/5] x86_64: kasan: flush tlbs after switching cr3 Andrey Ryabinin
2015-06-30 8:54 ` [PATCH 3/5] x86_64: kasan: fix boot crash on AMD processors Andrey Ryabinin
[not found] ` <1435654811-8915-1-git-send-email-a.ryabinin@samsung.com>
2015-06-30 9:00 ` [PATCH 1/5] x86_64: fix kasan shadow region page tables Andrey Ryabinin
2015-07-01 9:07 ` Ingo Molnar
2015-07-01 12:13 ` Andrey Ryabinin
2015-07-02 7:56 ` Ingo Molnar
2015-06-30 9:00 ` [PATCH 2/5] x86_64: kasan: flush tlbs after switching cr3 Andrey Ryabinin
2015-06-30 9:00 ` [PATCH 3/5] x86_64: kasan: fix boot crash on AMD processors Andrey Ryabinin
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).