* [PATCH v2 1/1] x86_64: fix KASan shadow region page tables
@ 2015-05-29 13:46 Alexander Popov
2015-05-29 14:51 ` Andrey Ryabinin
0 siblings, 1 reply; 2+ messages in thread
From: Alexander Popov @ 2015-05-29 13:46 UTC (permalink / raw)
To: Thomas Gleixner, Ingo Molnar, H. Peter Anvin, Andrey Ryabinin,
Andrey Konovalov, Andrew Morton, Kees Cook,
Peter Zijlstra (Intel), Andy Lutomirski, Alexander Kuleshov,
Borislav Petkov, Denys Vlasenko, Alexander Popov, x86,
linux-kernel
KASan shadow region page tables can't be filled statically because
physical addresses in these page tables depend on phys_base.
Initialize KASan shadow region page tables in kasan_early_init().
Signed-off-by: Alexander Popov <alpopov@ptsecurity.com>
---
arch/x86/include/asm/kasan.h | 6 ++----
arch/x86/kernel/head64.c | 2 ++
arch/x86/kernel/head_64.S | 29 -----------------------------
arch/x86/mm/kasan_init_64.c | 31 ++++++++++++++++++++++++++++++-
4 files changed, 34 insertions(+), 34 deletions(-)
diff --git a/arch/x86/include/asm/kasan.h b/arch/x86/include/asm/kasan.h
index 8b22422..d505f76 100644
--- a/arch/x86/include/asm/kasan.h
+++ b/arch/x86/include/asm/kasan.h
@@ -14,15 +14,13 @@
#ifndef __ASSEMBLY__
-extern pte_t kasan_zero_pte[];
-extern pte_t kasan_zero_pmd[];
-extern pte_t kasan_zero_pud[];
-
#ifdef CONFIG_KASAN
void __init kasan_map_early_shadow(pgd_t *pgd);
+void __init kasan_early_init(void);
void __init kasan_init(void);
#else
static inline void kasan_map_early_shadow(pgd_t *pgd) { }
+static inline void kasan_early_init(void) { }
static inline void kasan_init(void) { }
#endif
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 2b55ee6..b5cebca 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -161,6 +161,8 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
/* Kill off the identity-map trampoline */
reset_early_page_tables();
+ kasan_early_init();
+
kasan_map_early_shadow(early_level4_pgt);
/* clear bss before set_intr_gate with early_idt_handler */
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index ae6588b..b5c80c8 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -514,38 +514,9 @@ ENTRY(phys_base)
/* This must match the first entry in level2_kernel_pgt */
.quad 0x0000000000000000
-#ifdef CONFIG_KASAN
-#define FILL(VAL, COUNT) \
- .rept (COUNT) ; \
- .quad (VAL) ; \
- .endr
-
-NEXT_PAGE(kasan_zero_pte)
- FILL(kasan_zero_page - __START_KERNEL_map + _KERNPG_TABLE, 512)
-NEXT_PAGE(kasan_zero_pmd)
- FILL(kasan_zero_pte - __START_KERNEL_map + _KERNPG_TABLE, 512)
-NEXT_PAGE(kasan_zero_pud)
- FILL(kasan_zero_pmd - __START_KERNEL_map + _KERNPG_TABLE, 512)
-
-#undef FILL
-#endif
-
-
#include "../../x86/xen/xen-head.S"
__PAGE_ALIGNED_BSS
NEXT_PAGE(empty_zero_page)
.skip PAGE_SIZE
-#ifdef CONFIG_KASAN
-/*
- * This page used as early shadow. We don't use empty_zero_page
- * at early stages, stack instrumentation could write some garbage
- * to this page.
- * Latter we reuse it as zero shadow for large ranges of memory
- * that allowed to access, but not instrumented by kasan
- * (vmalloc/vmemmap ...).
- */
-NEXT_PAGE(kasan_zero_page)
- .skip PAGE_SIZE
-#endif
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 4860906..04f0275 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -11,7 +11,19 @@
extern pgd_t early_level4_pgt[PTRS_PER_PGD];
extern struct range pfn_mapped[E820_X_MAX];
-extern unsigned char kasan_zero_page[PAGE_SIZE];
+static pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_data;
+static pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_data;
+static pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_data;
+
+/*
+ * This page used as early shadow. We don't use empty_zero_page
+ * at early stages, stack instrumentation could write some garbage
+ * to this page.
+ * Latter we reuse it as zero shadow for large ranges of memory
+ * that allowed to access, but not instrumented by kasan
+ * (vmalloc/vmemmap ...).
+ */
+static unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
static int __init map_range(struct range *range)
{
@@ -166,6 +178,23 @@ static struct notifier_block kasan_die_notifier = {
};
#endif
+void __init kasan_early_init(void)
+{
+ int i;
+ pteval_t pte_val = __pa_nodebug(kasan_zero_page) | _KERNPG_TABLE;
+ pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
+ pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
+
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ kasan_zero_pte[i] = __pte(pte_val);
+
+ for (i = 0; i < PTRS_PER_PMD; i++)
+ kasan_zero_pmd[i] = __pmd(pmd_val);
+
+ for (i = 0; i < PTRS_PER_PUD; i++)
+ kasan_zero_pud[i] = __pud(pud_val);
+}
+
void __init kasan_init(void)
{
int i;
--
1.9.1
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [PATCH v2 1/1] x86_64: fix KASan shadow region page tables
2015-05-29 13:46 [PATCH v2 1/1] x86_64: fix KASan shadow region page tables Alexander Popov
@ 2015-05-29 14:51 ` Andrey Ryabinin
0 siblings, 0 replies; 2+ messages in thread
From: Andrey Ryabinin @ 2015-05-29 14:51 UTC (permalink / raw)
To: Alexander Popov
Cc: Thomas Gleixner, Ingo Molnar, H. Peter Anvin, Andrey Konovalov,
Andrew Morton, Kees Cook, Peter Zijlstra (Intel), Andy Lutomirski,
Alexander Kuleshov, Borislav Petkov, Denys Vlasenko, x86,
linux-kernel
On 05/29/2015 04:46 PM, Alexander Popov wrote:
> KASan shadow region page tables can't be filled statically because
> physical addresses in these page tables depend on phys_base.
> Initialize KASan shadow region page tables in kasan_early_init().
>
> Signed-off-by: Alexander Popov <alpopov@ptsecurity.com>
> ---
Thanks. Couple comments bellow, otherwise looks good.
>
> -extern unsigned char kasan_zero_page[PAGE_SIZE];
> +static pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_data;
> +static pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_data;
> +static pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_data;
It's better to keep these in bss.
> +/*
> + * This page used as early shadow. We don't use empty_zero_page
> + * at early stages, stack instrumentation could write some garbage
> + * to this page.
> + * Latter we reuse it as zero shadow for large ranges of memory
> + * that allowed to access, but not instrumented by kasan
> + * (vmalloc/vmemmap ...).
> + */
> +static unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
>
> static int __init map_range(struct range *range)
> {
> @@ -166,6 +178,23 @@ static struct notifier_block kasan_die_notifier = {
> };
> #endif
>
> +void __init kasan_early_init(void)
> +{
> + int i;
> + pteval_t pte_val = __pa_nodebug(kasan_zero_page) | _KERNPG_TABLE;
Should be __PAGE_KERNEL.
Yes, currently kasan's ptes have _KERNPG_TABLE flags, but that is wrong.
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2015-05-29 14:51 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-05-29 13:46 [PATCH v2 1/1] x86_64: fix KASan shadow region page tables Alexander Popov
2015-05-29 14:51 ` Andrey Ryabinin
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox