* [PATCH v3 1/6] Add TTBR operator for kasan_init
2018-04-02 12:04 [PATCH v3 0/6] KASan for arm Abbott Liu
@ 2018-04-02 12:04 ` Abbott Liu
2018-04-02 12:04 ` [PATCH v3 2/6] Disable instrumentation for some code Abbott Liu
` (4 subsequent siblings)
5 siblings, 0 replies; 12+ messages in thread
From: Abbott Liu @ 2018-04-02 12:04 UTC (permalink / raw)
To: linux-arm-kernel
The purpose of this patch is to provide set_ttbr0/get_ttbr0
to kasan_init function. The definitions of cp15 registers
should be in arch/arm/include/asm/cp15.h rather than
arch/arm/include/asm/kvm_hyp.h, so move them.
Cc: Andrey Ryabinin <a.ryabinin@samsung.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Reviewed-by: Russell King - ARM Linux <linux@armlinux.org.uk>
Reviewed-by: Christoffer Dall <cdall@linaro.org>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Tested-by: Florian Fainelli <f.fainelli@gmail.com>
Tested-by: Joel Stanley <joel@jms.id.au>
Tested-by: Abbott Liu <liuwenliang@huawei.com>
Signed-off-by: Abbott Liu <liuwenliang@huawei.com>
---
arch/arm/include/asm/cp15.h | 104 +++++++++++++++++++++++++++++++++++++++++
arch/arm/include/asm/kvm_hyp.h | 52 ---------------------
arch/arm/kvm/hyp/cp15-sr.c | 12 ++---
arch/arm/kvm/hyp/switch.c | 6 +--
4 files changed, 113 insertions(+), 61 deletions(-)
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
index 4c9fa72..99ebb31 100644
--- a/arch/arm/include/asm/cp15.h
+++ b/arch/arm/include/asm/cp15.h
@@ -3,6 +3,7 @@
#define __ASM_ARM_CP15_H
#include <asm/barrier.h>
+#include <linux/stringify.h>
/*
* CR1 bits (CP#15 CR1)
@@ -65,8 +66,111 @@
#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v)))
#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__)
+#define TTBR0_32 __ACCESS_CP15(c2, 0, c0, 0)
+#define TTBR1_32 __ACCESS_CP15(c2, 0, c0, 1)
+#define PAR_32 __ACCESS_CP15(c7, 0, c4, 0)
+#define TTBR0_64 __ACCESS_CP15_64(0, c2)
+#define TTBR1_64 __ACCESS_CP15_64(1, c2)
+#define PAR_64 __ACCESS_CP15_64(0, c7)
+#define VTTBR __ACCESS_CP15_64(6, c2)
+#define CNTV_CVAL __ACCESS_CP15_64(3, c14)
+#define CNTVOFF __ACCESS_CP15_64(4, c14)
+
+#define MIDR __ACCESS_CP15(c0, 0, c0, 0)
+#define CSSELR __ACCESS_CP15(c0, 2, c0, 0)
+#define VPIDR __ACCESS_CP15(c0, 4, c0, 0)
+#define VMPIDR __ACCESS_CP15(c0, 4, c0, 5)
+#define SCTLR __ACCESS_CP15(c1, 0, c0, 0)
+#define CPACR __ACCESS_CP15(c1, 0, c0, 2)
+#define HCR __ACCESS_CP15(c1, 4, c1, 0)
+#define HDCR __ACCESS_CP15(c1, 4, c1, 1)
+#define HCPTR __ACCESS_CP15(c1, 4, c1, 2)
+#define HSTR __ACCESS_CP15(c1, 4, c1, 3)
+#define TTBCR __ACCESS_CP15(c2, 0, c0, 2)
+#define HTCR __ACCESS_CP15(c2, 4, c0, 2)
+#define VTCR __ACCESS_CP15(c2, 4, c1, 2)
+#define DACR __ACCESS_CP15(c3, 0, c0, 0)
+#define DFSR __ACCESS_CP15(c5, 0, c0, 0)
+#define IFSR __ACCESS_CP15(c5, 0, c0, 1)
+#define ADFSR __ACCESS_CP15(c5, 0, c1, 0)
+#define AIFSR __ACCESS_CP15(c5, 0, c1, 1)
+#define HSR __ACCESS_CP15(c5, 4, c2, 0)
+#define DFAR __ACCESS_CP15(c6, 0, c0, 0)
+#define IFAR __ACCESS_CP15(c6, 0, c0, 2)
+#define HDFAR __ACCESS_CP15(c6, 4, c0, 0)
+#define HIFAR __ACCESS_CP15(c6, 4, c0, 2)
+#define HPFAR __ACCESS_CP15(c6, 4, c0, 4)
+#define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0)
+#define BPIALLIS __ACCESS_CP15(c7, 0, c1, 6)
+#define ICIMVAU __ACCESS_CP15(c7, 0, c5, 1)
+#define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0)
+#define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0)
+#define TLBIALL __ACCESS_CP15(c8, 0, c7, 0)
+#define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4)
+#define PRRR __ACCESS_CP15(c10, 0, c2, 0)
+#define NMRR __ACCESS_CP15(c10, 0, c2, 1)
+#define AMAIR0 __ACCESS_CP15(c10, 0, c3, 0)
+#define AMAIR1 __ACCESS_CP15(c10, 0, c3, 1)
+#define VBAR __ACCESS_CP15(c12, 0, c0, 0)
+#define CID __ACCESS_CP15(c13, 0, c0, 1)
+#define TID_URW __ACCESS_CP15(c13, 0, c0, 2)
+#define TID_URO __ACCESS_CP15(c13, 0, c0, 3)
+#define TID_PRIV __ACCESS_CP15(c13, 0, c0, 4)
+#define HTPIDR __ACCESS_CP15(c13, 4, c0, 2)
+#define CNTKCTL __ACCESS_CP15(c14, 0, c1, 0)
+#define CNTV_CTL __ACCESS_CP15(c14, 0, c3, 1)
+#define CNTHCTL __ACCESS_CP15(c14, 4, c1, 0)
+
extern unsigned long cr_alignment; /* defined in entry-armv.S */
+static inline void set_par(u64 val)
+{
+ if (IS_ENABLED(CONFIG_ARM_LPAE))
+ write_sysreg(val, PAR_64);
+ else
+ write_sysreg(val, PAR_32);
+}
+
+static inline u64 get_par(void)
+{
+ if (IS_ENABLED(CONFIG_ARM_LPAE))
+ return read_sysreg(PAR_64);
+ else
+ return read_sysreg(PAR_32);
+}
+
+static inline void set_ttbr0(u64 val)
+{
+ if (IS_ENABLED(CONFIG_ARM_LPAE))
+ write_sysreg(val, TTBR0_64);
+ else
+ write_sysreg(val, TTBR0_32);
+}
+
+static inline u64 get_ttbr0(void)
+{
+ if (IS_ENABLED(CONFIG_ARM_LPAE))
+ return read_sysreg(TTBR0_64);
+ else
+ return read_sysreg(TTBR0_32);
+}
+
+static inline void set_ttbr1(u64 val)
+{
+ if (IS_ENABLED(CONFIG_ARM_LPAE))
+ write_sysreg(val, TTBR1_64);
+ else
+ write_sysreg(val, TTBR1_32);
+}
+
+static inline u64 get_ttbr1(void)
+{
+ if (IS_ENABLED(CONFIG_ARM_LPAE))
+ return read_sysreg(TTBR1_64);
+ else
+ return read_sysreg(TTBR1_32);
+}
+
static inline unsigned long get_cr(void)
{
unsigned long val;
diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h
index 1ab8329..8e8592e 100644
--- a/arch/arm/include/asm/kvm_hyp.h
+++ b/arch/arm/include/asm/kvm_hyp.h
@@ -36,58 +36,6 @@
__val; \
})
-#define TTBR0 __ACCESS_CP15_64(0, c2)
-#define TTBR1 __ACCESS_CP15_64(1, c2)
-#define VTTBR __ACCESS_CP15_64(6, c2)
-#define PAR __ACCESS_CP15_64(0, c7)
-#define CNTV_CVAL __ACCESS_CP15_64(3, c14)
-#define CNTVOFF __ACCESS_CP15_64(4, c14)
-
-#define MIDR __ACCESS_CP15(c0, 0, c0, 0)
-#define CSSELR __ACCESS_CP15(c0, 2, c0, 0)
-#define VPIDR __ACCESS_CP15(c0, 4, c0, 0)
-#define VMPIDR __ACCESS_CP15(c0, 4, c0, 5)
-#define SCTLR __ACCESS_CP15(c1, 0, c0, 0)
-#define CPACR __ACCESS_CP15(c1, 0, c0, 2)
-#define HCR __ACCESS_CP15(c1, 4, c1, 0)
-#define HDCR __ACCESS_CP15(c1, 4, c1, 1)
-#define HCPTR __ACCESS_CP15(c1, 4, c1, 2)
-#define HSTR __ACCESS_CP15(c1, 4, c1, 3)
-#define TTBCR __ACCESS_CP15(c2, 0, c0, 2)
-#define HTCR __ACCESS_CP15(c2, 4, c0, 2)
-#define VTCR __ACCESS_CP15(c2, 4, c1, 2)
-#define DACR __ACCESS_CP15(c3, 0, c0, 0)
-#define DFSR __ACCESS_CP15(c5, 0, c0, 0)
-#define IFSR __ACCESS_CP15(c5, 0, c0, 1)
-#define ADFSR __ACCESS_CP15(c5, 0, c1, 0)
-#define AIFSR __ACCESS_CP15(c5, 0, c1, 1)
-#define HSR __ACCESS_CP15(c5, 4, c2, 0)
-#define DFAR __ACCESS_CP15(c6, 0, c0, 0)
-#define IFAR __ACCESS_CP15(c6, 0, c0, 2)
-#define HDFAR __ACCESS_CP15(c6, 4, c0, 0)
-#define HIFAR __ACCESS_CP15(c6, 4, c0, 2)
-#define HPFAR __ACCESS_CP15(c6, 4, c0, 4)
-#define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0)
-#define BPIALLIS __ACCESS_CP15(c7, 0, c1, 6)
-#define ICIMVAU __ACCESS_CP15(c7, 0, c5, 1)
-#define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0)
-#define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0)
-#define TLBIALL __ACCESS_CP15(c8, 0, c7, 0)
-#define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4)
-#define PRRR __ACCESS_CP15(c10, 0, c2, 0)
-#define NMRR __ACCESS_CP15(c10, 0, c2, 1)
-#define AMAIR0 __ACCESS_CP15(c10, 0, c3, 0)
-#define AMAIR1 __ACCESS_CP15(c10, 0, c3, 1)
-#define VBAR __ACCESS_CP15(c12, 0, c0, 0)
-#define CID __ACCESS_CP15(c13, 0, c0, 1)
-#define TID_URW __ACCESS_CP15(c13, 0, c0, 2)
-#define TID_URO __ACCESS_CP15(c13, 0, c0, 3)
-#define TID_PRIV __ACCESS_CP15(c13, 0, c0, 4)
-#define HTPIDR __ACCESS_CP15(c13, 4, c0, 2)
-#define CNTKCTL __ACCESS_CP15(c14, 0, c1, 0)
-#define CNTV_CTL __ACCESS_CP15(c14, 0, c3, 1)
-#define CNTHCTL __ACCESS_CP15(c14, 4, c1, 0)
-
#define VFP_FPEXC __ACCESS_VFP(FPEXC)
/* AArch64 compatibility macros, only for the timer so far */
diff --git a/arch/arm/kvm/hyp/cp15-sr.c b/arch/arm/kvm/hyp/cp15-sr.c
index c478281..d365e3c 100644
--- a/arch/arm/kvm/hyp/cp15-sr.c
+++ b/arch/arm/kvm/hyp/cp15-sr.c
@@ -31,8 +31,8 @@ void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
ctxt->cp15[c0_CSSELR] = read_sysreg(CSSELR);
ctxt->cp15[c1_SCTLR] = read_sysreg(SCTLR);
ctxt->cp15[c1_CPACR] = read_sysreg(CPACR);
- *cp15_64(ctxt, c2_TTBR0) = read_sysreg(TTBR0);
- *cp15_64(ctxt, c2_TTBR1) = read_sysreg(TTBR1);
+ *cp15_64(ctxt, c2_TTBR0) = read_sysreg(TTBR0_64);
+ *cp15_64(ctxt, c2_TTBR1) = read_sysreg(TTBR1_64);
ctxt->cp15[c2_TTBCR] = read_sysreg(TTBCR);
ctxt->cp15[c3_DACR] = read_sysreg(DACR);
ctxt->cp15[c5_DFSR] = read_sysreg(DFSR);
@@ -41,7 +41,7 @@ void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
ctxt->cp15[c5_AIFSR] = read_sysreg(AIFSR);
ctxt->cp15[c6_DFAR] = read_sysreg(DFAR);
ctxt->cp15[c6_IFAR] = read_sysreg(IFAR);
- *cp15_64(ctxt, c7_PAR) = read_sysreg(PAR);
+ *cp15_64(ctxt, c7_PAR) = read_sysreg(PAR_64);
ctxt->cp15[c10_PRRR] = read_sysreg(PRRR);
ctxt->cp15[c10_NMRR] = read_sysreg(NMRR);
ctxt->cp15[c10_AMAIR0] = read_sysreg(AMAIR0);
@@ -60,8 +60,8 @@ void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
write_sysreg(ctxt->cp15[c0_CSSELR], CSSELR);
write_sysreg(ctxt->cp15[c1_SCTLR], SCTLR);
write_sysreg(ctxt->cp15[c1_CPACR], CPACR);
- write_sysreg(*cp15_64(ctxt, c2_TTBR0), TTBR0);
- write_sysreg(*cp15_64(ctxt, c2_TTBR1), TTBR1);
+ write_sysreg(*cp15_64(ctxt, c2_TTBR0), TTBR0_64);
+ write_sysreg(*cp15_64(ctxt, c2_TTBR1), TTBR1_64);
write_sysreg(ctxt->cp15[c2_TTBCR], TTBCR);
write_sysreg(ctxt->cp15[c3_DACR], DACR);
write_sysreg(ctxt->cp15[c5_DFSR], DFSR);
@@ -70,7 +70,7 @@ void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
write_sysreg(ctxt->cp15[c5_AIFSR], AIFSR);
write_sysreg(ctxt->cp15[c6_DFAR], DFAR);
write_sysreg(ctxt->cp15[c6_IFAR], IFAR);
- write_sysreg(*cp15_64(ctxt, c7_PAR), PAR);
+ write_sysreg(*cp15_64(ctxt, c7_PAR), PAR_64);
write_sysreg(ctxt->cp15[c10_PRRR], PRRR);
write_sysreg(ctxt->cp15[c10_NMRR], NMRR);
write_sysreg(ctxt->cp15[c10_AMAIR0], AMAIR0);
diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c
index ae45ae9..94d5bb9 100644
--- a/arch/arm/kvm/hyp/switch.c
+++ b/arch/arm/kvm/hyp/switch.c
@@ -134,12 +134,12 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
if (!(hsr & HSR_DABT_S1PTW) && (hsr & HSR_FSC_TYPE) == FSC_PERM) {
u64 par, tmp;
- par = read_sysreg(PAR);
+ par = read_sysreg(PAR_64);
write_sysreg(far, ATS1CPR);
isb();
- tmp = read_sysreg(PAR);
- write_sysreg(par, PAR);
+ tmp = read_sysreg(PAR_64);
+ write_sysreg(par, PAR_64);
if (unlikely(tmp & 1))
return false; /* Translation failed, back to guest */
--
2.9.0
^ permalink raw reply related [flat|nested] 12+ messages in thread* [PATCH v3 3/6] Replace memory function for kasan
2018-04-02 12:04 [PATCH v3 0/6] KASan for arm Abbott Liu
2018-04-02 12:04 ` [PATCH v3 1/6] Add TTBR operator for kasan_init Abbott Liu
2018-04-02 12:04 ` [PATCH v3 2/6] Disable instrumentation for some code Abbott Liu
@ 2018-04-02 12:04 ` Abbott Liu
2018-04-02 12:04 ` [PATCH v3 4/6] Define the virtual space of KASan's shadow region Abbott Liu
` (2 subsequent siblings)
5 siblings, 0 replies; 12+ messages in thread
From: Abbott Liu @ 2018-04-02 12:04 UTC (permalink / raw)
To: linux-arm-kernel
From: Andrey Ryabinin <a.ryabinin@samsung.com>
Functions like memset/memmove/memcpy do a lot of memory accesses.
If bad pointer passed to one of these function it is important
to catch this. Compiler's instrumentation cannot do this since
these functions are written in assembly.
KASan replaces memory functions with manually instrumented variants.
Original functions declared as weak symbols so strong definitions
in mm/kasan/kasan.c could replace them. Original functions have aliases
with '__' prefix in name, so we could call non-instrumented variant
if needed.
We must use __memcpy/__memset to replace memcpy/memset when we copy
.data to RAM and when we clear .bss, because kasan_early_init can't
be called before the initialization of .data and .bss.
Reviewed-by: Russell King - ARM Linux <linux@armlinux.org.uk>
Acked-by: Florian Fainelli <f.fainelli@gmail.com>
Tested-by: Florian Fainelli <f.fainelli@gmail.com>
Tested-by: Joel Stanley <joel@jms.id.au>
Tested-by: Abbott Liu <liuwenliang@huawei.com>
Signed-off-by: Abbott Liu <liuwenliang@huawei.com>
---
arch/arm/boot/compressed/decompress.c | 2 ++
arch/arm/boot/compressed/libfdt_env.h | 2 ++
arch/arm/include/asm/string.h | 17 +++++++++++++++++
arch/arm/kernel/head-common.S | 4 ++--
arch/arm/lib/memcpy.S | 3 +++
arch/arm/lib/memmove.S | 5 ++++-
arch/arm/lib/memset.S | 3 +++
7 files changed, 33 insertions(+), 3 deletions(-)
diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c
index a2ac3fe..0596077 100644
--- a/arch/arm/boot/compressed/decompress.c
+++ b/arch/arm/boot/compressed/decompress.c
@@ -49,8 +49,10 @@ extern int memcmp(const void *cs, const void *ct, size_t count);
#endif
#ifdef CONFIG_KERNEL_XZ
+#ifndef CONFIG_KASAN
#define memmove memmove
#define memcpy memcpy
+#endif
#include "../../../../lib/decompress_unxz.c"
#endif
diff --git a/arch/arm/boot/compressed/libfdt_env.h b/arch/arm/boot/compressed/libfdt_env.h
index 0743781..736ed36 100644
--- a/arch/arm/boot/compressed/libfdt_env.h
+++ b/arch/arm/boot/compressed/libfdt_env.h
@@ -17,4 +17,6 @@ typedef __be64 fdt64_t;
#define fdt64_to_cpu(x) be64_to_cpu(x)
#define cpu_to_fdt64(x) cpu_to_be64(x)
+#undef memset
+
#endif
diff --git a/arch/arm/include/asm/string.h b/arch/arm/include/asm/string.h
index 111a1d8..1f9016b 100644
--- a/arch/arm/include/asm/string.h
+++ b/arch/arm/include/asm/string.h
@@ -15,15 +15,18 @@ extern char * strchr(const char * s, int c);
#define __HAVE_ARCH_MEMCPY
extern void * memcpy(void *, const void *, __kernel_size_t);
+extern void *__memcpy(void *dest, const void *src, __kernel_size_t n);
#define __HAVE_ARCH_MEMMOVE
extern void * memmove(void *, const void *, __kernel_size_t);
+extern void *__memmove(void *dest, const void *src, __kernel_size_t n);
#define __HAVE_ARCH_MEMCHR
extern void * memchr(const void *, int, __kernel_size_t);
#define __HAVE_ARCH_MEMSET
extern void * memset(void *, int, __kernel_size_t);
+extern void *__memset(void *s, int c, __kernel_size_t n);
#define __HAVE_ARCH_MEMSET32
extern void *__memset32(uint32_t *, uint32_t v, __kernel_size_t);
@@ -39,4 +42,18 @@ static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n)
return __memset64(p, v, n * 8, v >> 32);
}
+
+
+#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+
+/*
+ * For files that not instrumented (e.g. mm/slub.c) we
+ * should use not instrumented version of mem* functions.
+ */
+
+#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memmove(dst, src, len) __memmove(dst, src, len)
+#define memset(s, c, n) __memset(s, c, n)
+#endif
+
#endif
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 6e0375e..c79b829 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -99,7 +99,7 @@ __mmap_switched:
THUMB( ldmia r4!, {r0, r1, r2, r3} )
THUMB( mov sp, r3 )
sub r2, r2, r1
- bl memcpy @ copy .data to RAM
+ bl __memcpy @ copy .data to RAM
#endif
ARM( ldmia r4!, {r0, r1, sp} )
@@ -107,7 +107,7 @@ __mmap_switched:
THUMB( mov sp, r3 )
sub r2, r1, r0
mov r1, #0
- bl memset @ clear .bss
+ bl __memset @ clear .bss
ldmia r4, {r0, r1, r2, r3}
str r9, [r0] @ Save processor ID
diff --git a/arch/arm/lib/memcpy.S b/arch/arm/lib/memcpy.S
index 64111bd..79a83f8 100644
--- a/arch/arm/lib/memcpy.S
+++ b/arch/arm/lib/memcpy.S
@@ -61,6 +61,8 @@
/* Prototype: void *memcpy(void *dest, const void *src, size_t n); */
+.weak memcpy
+ENTRY(__memcpy)
ENTRY(mmiocpy)
ENTRY(memcpy)
@@ -68,3 +70,4 @@ ENTRY(memcpy)
ENDPROC(memcpy)
ENDPROC(mmiocpy)
+ENDPROC(__memcpy)
diff --git a/arch/arm/lib/memmove.S b/arch/arm/lib/memmove.S
index 69a9d47..313db6c 100644
--- a/arch/arm/lib/memmove.S
+++ b/arch/arm/lib/memmove.S
@@ -27,12 +27,14 @@
* occurring in the opposite direction.
*/
+.weak memmove
+ENTRY(__memmove)
ENTRY(memmove)
UNWIND( .fnstart )
subs ip, r0, r1
cmphi r2, ip
- bls memcpy
+ bls __memcpy
stmfd sp!, {r0, r4, lr}
UNWIND( .fnend )
@@ -225,3 +227,4 @@ ENTRY(memmove)
18: backward_copy_shift push=24 pull=8
ENDPROC(memmove)
+ENDPROC(__memmove)
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index ed6d35d..64aa06a 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -16,6 +16,8 @@
.text
.align 5
+.weak memset
+ENTRY(__memset)
ENTRY(mmioset)
ENTRY(memset)
UNWIND( .fnstart )
@@ -135,6 +137,7 @@ UNWIND( .fnstart )
UNWIND( .fnend )
ENDPROC(memset)
ENDPROC(mmioset)
+ENDPROC(__memset)
ENTRY(__memset32)
UNWIND( .fnstart )
--
2.9.0
^ permalink raw reply related [flat|nested] 12+ messages in thread* [PATCH v3 4/6] Define the virtual space of KASan's shadow region
2018-04-02 12:04 [PATCH v3 0/6] KASan for arm Abbott Liu
` (2 preceding siblings ...)
2018-04-02 12:04 ` [PATCH v3 3/6] Replace memory function for kasan Abbott Liu
@ 2018-04-02 12:04 ` Abbott Liu
2018-04-02 12:04 ` [PATCH v3 5/6] Initialize the mapping of KASan shadow memory Abbott Liu
2018-04-02 12:04 ` [PATCH v3 6/6] Enable KASan for arm Abbott Liu
5 siblings, 0 replies; 12+ messages in thread
From: Abbott Liu @ 2018-04-02 12:04 UTC (permalink / raw)
To: linux-arm-kernel
Define KASAN_SHADOW_OFFSET,KASAN_SHADOW_START and KASAN_SHADOW_END for arm
kernel address sanitizer.
+----+ 0xffffffff
| |
| |
| |
+----+ CONFIG_PAGE_OFFSET
| | | | |-> module virtual address space area.
| |/
+----+ MODULE_VADDR = KASAN_SHADOW_END
| | | | |-> the shadow area of kernel virtual address.
| |/
+----+ TASK_SIZE(start of kernel space) = KASAN_SHADOW_START the
| |\ shadow address of MODULE_VADDR
| | ---------------------+
| | |
+ + KASAN_SHADOW_OFFSET |-> the user space area. Kernel address
| | | sanitizer do not use this space.
| | ---------------------+
| |/
------ 0
1)KASAN_SHADOW_OFFSET:
This value is used to map an address to the corresponding shadow
address by the following formula:
shadow_addr = (address >> 3) + KASAN_SHADOW_OFFSET;
2)KASAN_SHADOW_START
This value is the MODULE_VADDR's shadow address. It is the start
of kernel virtual space.
3)KASAN_SHADOW_END
This value is the 0x100000000's shadow address. It is the end of
kernel addresssanitizer's shadow area. It is also the start of the
module area.
When enable kasan, the definition of TASK_SIZE is not an an 8-bit
rotated constant, so we need to modify the TASK_SIZE access code
in the *.s file.
Cc: Andrey Ryabinin <a.ryabinin@samsung.com>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Russell King - ARM Linux <linux@armlinux.org.uk>
Tested-by: Joel Stanley <joel@jms.id.au>
Tested-by: Florian Fainelli <f.fainelli@gmail.com>
Tested-by: Abbott Liu <liuwenliang@huawei.com>
Signed-off-by: Abbott Liu <liuwenliang@huawei.com>
---
arch/arm/include/asm/kasan_def.h | 64 ++++++++++++++++++++++++++++++++++++++++
arch/arm/include/asm/memory.h | 5 ++++
arch/arm/kernel/entry-armv.S | 5 ++--
arch/arm/kernel/entry-common.S | 9 ++++--
arch/arm/mm/init.c | 6 ++++
arch/arm/mm/mmu.c | 7 ++++-
6 files changed, 90 insertions(+), 6 deletions(-)
create mode 100644 arch/arm/include/asm/kasan_def.h
diff --git a/arch/arm/include/asm/kasan_def.h b/arch/arm/include/asm/kasan_def.h
new file mode 100644
index 0000000..7b7f424
--- /dev/null
+++ b/arch/arm/include/asm/kasan_def.h
@@ -0,0 +1,64 @@
+/*
+ * arch/arm/include/asm/kasan_def.h
+ *
+ * Copyright (c) 2018 Huawei Technologies Co., Ltd.
+ *
+ * Author: Abbott Liu <liuwenliang@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_KASAN_DEF_H
+#define __ASM_KASAN_DEF_H
+
+#ifdef CONFIG_KASAN
+
+/*
+ * +----+ 0xffffffff
+ * | |
+ * | |
+ * | |
+ * +----+ CONFIG_PAGE_OFFSET
+ * | |\
+ * | | |-> module virtual address space area.
+ * | |/
+ * +----+ MODULE_VADDR = KASAN_SHADOW_END
+ * | |\
+ * | | |-> the shadow area of kernel virtual address.
+ * | |/
+ * +----+ TASK_SIZE(start of kernel space) = KASAN_SHADOW_START the
+ * | |\ shadow address of MODULE_VADDR
+ * | | ---------------------+
+ * | | |
+ * + + KASAN_SHADOW_OFFSET |-> the user space area. Kernel address
+ * | | | sanitizer do not use this space.
+ * | | ---------------------+
+ * | |/
+ * ------ 0
+ *
+ *1)KASAN_SHADOW_OFFSET:
+ * This value is used to map an address to the corresponding shadow
+ * address by the following formula:
+ * shadow_addr = (address >> 3) + KASAN_SHADOW_OFFSET;
+ *
+ * 2)KASAN_SHADOW_START
+ * This value is the MODULE_VADDR's shadow address. It is the start
+ * of kernel virtual space.
+ *
+ * 3) KASAN_SHADOW_END
+ * This value is the 0x100000000's shadow address. It is the end of
+ * kernel addresssanitizer's shadow area. It is also the start of the
+ * module area.
+ *
+ */
+
+#define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1<<29))
+
+#define KASAN_SHADOW_START ((KASAN_SHADOW_END >> 3) + KASAN_SHADOW_OFFSET)
+
+#define KASAN_SHADOW_END (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
+
+#endif
+#endif
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 4966677..3ce1a9a 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -21,6 +21,7 @@
#ifdef CONFIG_NEED_MACH_MEMORY_H
#include <mach/memory.h>
#endif
+#include <asm/kasan_def.h>
/*
* Allow for constants defined here to be used from assembly code
@@ -37,7 +38,11 @@
* TASK_SIZE - the maximum size of a user space task.
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
*/
+#ifndef CONFIG_KASAN
#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
+#else
+#define TASK_SIZE (KASAN_SHADOW_START)
+#endif
#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
/*
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 1752033..b4de9e4 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -183,7 +183,7 @@ ENDPROC(__und_invalid)
get_thread_info tsk
ldr r0, [tsk, #TI_ADDR_LIMIT]
- mov r1, #TASK_SIZE
+ ldr r1, =TASK_SIZE
str r1, [tsk, #TI_ADDR_LIMIT]
str r0, [sp, #SVC_ADDR_LIMIT]
@@ -437,7 +437,8 @@ ENDPROC(__fiq_abt)
@ if it was interrupted in a critical region. Here we
@ perform a quick test inline since it should be false
@ 99.9999% of the time. The rest is done out of line.
- cmp r4, #TASK_SIZE
+ ldr r0, =TASK_SIZE
+ cmp r4, r0
blhs kuser_cmpxchg64_fixup
#endif
#endif
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 3c4f887..78046de 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -51,7 +51,8 @@ ret_fast_syscall:
UNWIND(.cantunwind )
disable_irq_notrace @ disable interrupts
ldr r2, [tsk, #TI_ADDR_LIMIT]
- cmp r2, #TASK_SIZE
+ ldr r1, =TASK_SIZE
+ cmp r2, r1
blne addr_limit_check_failed
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
@@ -81,7 +82,8 @@ ret_fast_syscall:
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
disable_irq_notrace @ disable interrupts
ldr r2, [tsk, #TI_ADDR_LIMIT]
- cmp r2, #TASK_SIZE
+ ldr r1, =TASK_SIZE
+ cmp r2, r1
blne addr_limit_check_failed
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
@@ -116,7 +118,8 @@ ret_slow_syscall:
disable_irq_notrace @ disable interrupts
ENTRY(ret_to_user_from_irq)
ldr r2, [tsk, #TI_ADDR_LIMIT]
- cmp r2, #TASK_SIZE
+ ldr r1, =TASK_SIZE
+ cmp r2, r1
blne addr_limit_check_failed
ldr r1, [tsk, #TI_FLAGS]
tst r1, #_TIF_WORK_MASK
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index bd6f451..da11f61 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -538,6 +538,9 @@ void __init mem_init(void)
#ifdef CONFIG_MODULES
" modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
#endif
+#ifdef CONFIG_KASAN
+ " kasan : 0x%08lx - 0x%08lx (%4ld MB)\n"
+#endif
" .text : 0x%p" " - 0x%p" " (%4td kB)\n"
" .init : 0x%p" " - 0x%p" " (%4td kB)\n"
" .data : 0x%p" " - 0x%p" " (%4td kB)\n"
@@ -558,6 +561,9 @@ void __init mem_init(void)
#ifdef CONFIG_MODULES
MLM(MODULES_VADDR, MODULES_END),
#endif
+#ifdef CONFIG_KASAN
+ MLM(KASAN_SHADOW_START, KASAN_SHADOW_END),
+#endif
MLK_ROUNDUP(_text, _etext),
MLK_ROUNDUP(__init_begin, __init_end),
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index e46a6a4..f5aa1de 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1251,9 +1251,14 @@ static inline void prepare_page_table(void)
/*
* Clear out all the mappings below the kernel image.
*/
- for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
+ for (addr = 0; addr < TASK_SIZE; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
+#ifdef CONFIG_KASAN
+ /*TASK_SIZE ~ MODULES_VADDR is the KASAN's shadow area -- skip over it*/
+ addr = MODULES_VADDR;
+#endif
+
#ifdef CONFIG_XIP_KERNEL
/* The XIP kernel is mapped in the module area -- skip over it */
addr = ((unsigned long)_exiprom + PMD_SIZE - 1) & PMD_MASK;
--
2.9.0
^ permalink raw reply related [flat|nested] 12+ messages in thread* [PATCH v3 5/6] Initialize the mapping of KASan shadow memory
2018-04-02 12:04 [PATCH v3 0/6] KASan for arm Abbott Liu
` (3 preceding siblings ...)
2018-04-02 12:04 ` [PATCH v3 4/6] Define the virtual space of KASan's shadow region Abbott Liu
@ 2018-04-02 12:04 ` Abbott Liu
2018-04-02 18:08 ` Nicolas Pitre
2018-04-02 12:04 ` [PATCH v3 6/6] Enable KASan for arm Abbott Liu
5 siblings, 1 reply; 12+ messages in thread
From: Abbott Liu @ 2018-04-02 12:04 UTC (permalink / raw)
To: linux-arm-kernel
From: Andrey Ryabinin <a.ryabinin@samsung.com>
This patch initializes KASan shadow region's page table and memory.
There are two stage for KASan initializing:
1. At early boot stage the whole shadow region is mapped to just
one physical page (kasan_zero_page). It's finished by the function
kasan_early_init which is called by __mmap_switched(arch/arm/kernel/
head-common.S)
---Andrey Ryabinin <a.ryabinin@samsung.com>
2. After the calling of paging_init, we use kasan_zero_page as zero
shadow for some memory that KASan don't need to track, and we alloc
new shadow space for the other memory that KASan need to track. These
issues are finished by the function kasan_init which is call by
setup_arch.
---Andrey Ryabinin <a.ryabinin@samsung.com>
3. Add support arm LPAE
If LPAE is enabled, KASan shadow region's mapping table need be copyed
in pgd_alloc function.
---Abbott Liu <liuwenliang@huawei.com>
4. In 64bit machine, size_t is unsigned long, but int 32bit machine,
size_t is unsigned int, so we need type conversion in
the function of kasan_cache_create.
---Abbott Liu <liuwenliang@huawei.com>
5. Change kasan_pte_populate,kasan_pmd_populate,kasan_pud_populate,
kasan_pgd_populate from .meminit.text section to .init.text section.
---Reported by: Florian Fainelli <f.fainelli@gmail.com>
---Signed off by: Abbott Liu <liuwenliang@huawei.com>
Cc: Andrey Ryabinin <a.ryabinin@samsung.com>
Co-Developed-by: Abbott Liu <liuwenliang@huawei.com>
Reviewed-by: Russell King - ARM Linux <linux@armlinux.org.uk>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reported-by: Florian Fainelli <f.fainelli@gmail.com>
Tested-by: Florian Fainelli <f.fainelli@gmail.com>
Tested-by: Joel Stanley <joel@jms.id.au>
Tested-by: Abbott Liu <liuwenliang@huawei.com>
Signed-off-by: Abbott Liu <liuwenliang@huawei.com>
---
arch/arm/include/asm/kasan.h | 35 +++++
arch/arm/include/asm/pgalloc.h | 7 +-
arch/arm/include/asm/thread_info.h | 4 +
arch/arm/kernel/head-common.S | 3 +
arch/arm/kernel/setup.c | 2 +
arch/arm/mm/Makefile | 3 +
arch/arm/mm/kasan_init.c | 302 +++++++++++++++++++++++++++++++++++++
arch/arm/mm/pgd.c | 14 ++
mm/kasan/kasan.c | 5 +-
9 files changed, 371 insertions(+), 4 deletions(-)
create mode 100644 arch/arm/include/asm/kasan.h
create mode 100644 arch/arm/mm/kasan_init.c
diff --git a/arch/arm/include/asm/kasan.h b/arch/arm/include/asm/kasan.h
new file mode 100644
index 0000000..1801f4d
--- /dev/null
+++ b/arch/arm/include/asm/kasan.h
@@ -0,0 +1,35 @@
+/*
+ * arch/arm/include/asm/kasan.h
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ASM_KASAN_H
+#define __ASM_KASAN_H
+
+#ifdef CONFIG_KASAN
+
+#include <asm/kasan_def.h>
+
+#define KASAN_SHADOW_SCALE_SHIFT 3
+
+/*
+ * Compiler uses shadow offset assuming that addresses start
+ * from 0. Kernel addresses don't start from 0, so shadow
+ * for kernel really starts from 'compiler's shadow offset' +
+ * ('kernel address space start' >> KASAN_SHADOW_SCALE_SHIFT)
+ */
+
+extern void kasan_init(void);
+
+#else
+static inline void kasan_init(void) { }
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index 2d7344f..f170659 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -50,8 +50,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
*/
#define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
#define pmd_free(mm, pmd) do { } while (0)
-#define pud_populate(mm,pmd,pte) BUG()
-
+#ifndef CONFIG_KASAN
+#define pud_populate(mm, pmd, pte) BUG()
+#else
+#define pud_populate(mm, pmd, pte) do { } while (0)
+#endif
#endif /* CONFIG_ARM_LPAE */
extern pgd_t *pgd_alloc(struct mm_struct *mm);
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index e71cc35..bc681a0 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -16,7 +16,11 @@
#include <asm/fpstate.h>
#include <asm/page.h>
+#ifdef CONFIG_KASAN
+#define THREAD_SIZE_ORDER 2
+#else
#define THREAD_SIZE_ORDER 1
+#endif
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#define THREAD_START_SP (THREAD_SIZE - 8)
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index c79b829..20161e2 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -115,6 +115,9 @@ __mmap_switched:
str r8, [r2] @ Save atags pointer
cmp r3, #0
strne r10, [r3] @ Save control register values
+#ifdef CONFIG_KASAN
+ bl kasan_early_init
+#endif
mov lr, #0
b start_kernel
ENDPROC(__mmap_switched)
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index fc40a2b..81c3e9df 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -62,6 +62,7 @@
#include <asm/unwind.h>
#include <asm/memblock.h>
#include <asm/virt.h>
+#include <asm/kasan.h>
#include "atags.h"
@@ -1118,6 +1119,7 @@ void __init setup_arch(char **cmdline_p)
early_ioremap_reset();
paging_init(mdesc);
+ kasan_init();
request_standard_resources(mdesc);
if (mdesc->restart)
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 9dbb849..573203e 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -111,3 +111,6 @@ obj-$(CONFIG_CACHE_L2X0_PMU) += cache-l2x0-pmu.o
obj-$(CONFIG_CACHE_XSC3L2) += cache-xsc3l2.o
obj-$(CONFIG_CACHE_TAUROS2) += cache-tauros2.o
obj-$(CONFIG_CACHE_UNIPHIER) += cache-uniphier.o
+
+KASAN_SANITIZE_kasan_init.o := n
+obj-$(CONFIG_KASAN) += kasan_init.o
diff --git a/arch/arm/mm/kasan_init.c b/arch/arm/mm/kasan_init.c
new file mode 100644
index 0000000..461cc85
--- /dev/null
+++ b/arch/arm/mm/kasan_init.c
@@ -0,0 +1,302 @@
+/*
+ * This file contains kasan initialization code for ARM.
+ *
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd.
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/bootmem.h>
+#include <linux/kasan.h>
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <linux/start_kernel.h>
+#include <asm/cputype.h>
+#include <asm/highmem.h>
+#include <asm/mach/map.h>
+#include <asm/memory.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/procinfo.h>
+#include <asm/proc-fns.h>
+#include <asm/tlbflush.h>
+#include <asm/cp15.h>
+#include <linux/sched/task.h>
+
+#include "mm.h"
+
+static pgd_t tmp_pgd_table[PTRS_PER_PGD] __initdata __aligned(1ULL << 14);
+
+pmd_t tmp_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
+
+static __init void *kasan_alloc_block(size_t size, int node)
+{
+ return memblock_virt_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
+ BOOTMEM_ALLOC_ACCESSIBLE, node);
+}
+
+static void __init kasan_early_pmd_populate(unsigned long start,
+ unsigned long end, pud_t *pud)
+{
+ unsigned long addr;
+ unsigned long next;
+ pmd_t *pmd;
+
+ pmd = pmd_offset(pud, start);
+ for (addr = start; addr < end;) {
+ pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
+ next = pmd_addr_end(addr, end);
+ addr = next;
+ flush_pmd_entry(pmd);
+ pmd++;
+ }
+}
+
+static void __init kasan_early_pud_populate(unsigned long start,
+ unsigned long end, pgd_t *pgd)
+{
+ unsigned long addr;
+ unsigned long next;
+ pud_t *pud;
+
+ pud = pud_offset(pgd, start);
+ for (addr = start; addr < end;) {
+ next = pud_addr_end(addr, end);
+ kasan_early_pmd_populate(addr, next, pud);
+ addr = next;
+ pud++;
+ }
+}
+
+void __init kasan_map_early_shadow(pgd_t *pgdp)
+{
+ int i;
+ unsigned long start = KASAN_SHADOW_START;
+ unsigned long end = KASAN_SHADOW_END;
+ unsigned long addr;
+ unsigned long next;
+ pgd_t *pgd;
+
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ set_pte_at(&init_mm, KASAN_SHADOW_START + i*PAGE_SIZE,
+ &kasan_zero_pte[i], pfn_pte(
+ virt_to_pfn(kasan_zero_page),
+ __pgprot(_L_PTE_DEFAULT | L_PTE_DIRTY
+ | L_PTE_XN)));
+
+ pgd = pgd_offset_k(start);
+ for (addr = start; addr < end;) {
+ next = pgd_addr_end(addr, end);
+ kasan_early_pud_populate(addr, next, pgd);
+ addr = next;
+ pgd++;
+ }
+}
+
+extern struct proc_info_list *lookup_processor_type(unsigned int);
+
+void __init kasan_early_init(void)
+{
+ struct proc_info_list *list;
+
+ /*
+ * locate processor in the list of supported processor
+ * types. The linker builds this table for us from the
+ * entries in arch/arm/mm/proc-*.S
+ */
+ list = lookup_processor_type(read_cpuid_id());
+ if (list) {
+#ifdef MULTI_CPU
+ processor = *list->proc;
+#endif
+ }
+
+ BUILD_BUG_ON((KASAN_SHADOW_END - (1UL << 29)) != KASAN_SHADOW_OFFSET);
+ kasan_map_early_shadow(swapper_pg_dir);
+}
+
+static void __init clear_pgds(unsigned long start,
+ unsigned long end)
+{
+ for (; start && start < end; start += PMD_SIZE)
+ pmd_clear(pmd_off_k(start));
+}
+
+pte_t * __init kasan_pte_populate(pmd_t *pmd, unsigned long addr, int node)
+{
+ pte_t *pte = pte_offset_kernel(pmd, addr);
+
+ if (pte_none(*pte)) {
+ pte_t entry;
+ void *p = kasan_alloc_block(PAGE_SIZE, node);
+
+ if (!p)
+ return NULL;
+ entry = pfn_pte(virt_to_pfn(p),
+ __pgprot(pgprot_val(PAGE_KERNEL)));
+ set_pte_at(&init_mm, addr, pte, entry);
+ }
+ return pte;
+}
+
+pmd_t * __init kasan_pmd_populate(pud_t *pud, unsigned long addr, int node)
+{
+ pmd_t *pmd = pmd_offset(pud, addr);
+
+ if (pmd_none(*pmd)) {
+ void *p = kasan_alloc_block(PAGE_SIZE, node);
+
+ if (!p)
+ return NULL;
+ pmd_populate_kernel(&init_mm, pmd, p);
+ }
+ return pmd;
+}
+
+pud_t * __init kasan_pud_populate(pgd_t *pgd, unsigned long addr, int node)
+{
+ pud_t *pud = pud_offset(pgd, addr);
+
+ if (pud_none(*pud)) {
+ void *p = kasan_alloc_block(PAGE_SIZE, node);
+
+ if (!p)
+ return NULL;
+ pr_err("populating pud addr %lx\n", addr);
+ pud_populate(&init_mm, pud, p);
+ }
+ return pud;
+}
+
+pgd_t * __init kasan_pgd_populate(unsigned long addr, int node)
+{
+ pgd_t *pgd = pgd_offset_k(addr);
+
+ if (pgd_none(*pgd)) {
+ void *p = kasan_alloc_block(PAGE_SIZE, node);
+
+ if (!p)
+ return NULL;
+ pgd_populate(&init_mm, pgd, p);
+ }
+ return pgd;
+}
+
+static int __init create_mapping(unsigned long start, unsigned long end,
+ int node)
+{
+ unsigned long addr = start;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pr_info("populating shadow for %lx, %lx\n", start, end);
+
+ for (; addr < end; addr += PAGE_SIZE) {
+ pgd = kasan_pgd_populate(addr, node);
+ if (!pgd)
+ return -ENOMEM;
+
+ pud = kasan_pud_populate(pgd, addr, node);
+ if (!pud)
+ return -ENOMEM;
+
+ pmd = kasan_pmd_populate(pud, addr, node);
+ if (!pmd)
+ return -ENOMEM;
+
+ pte = kasan_pte_populate(pmd, addr, node);
+ if (!pte)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+
+void __init kasan_init(void)
+{
+ struct memblock_region *reg;
+ u64 orig_ttbr0;
+ int i;
+
+ /*
+ * We are going to perform proper setup of shadow memory.
+ * At first we should unmap early shadow (clear_pgds() call bellow).
+ * However, instrumented code couldn't execute without shadow memory.
+ * tmp_pgd_table and tmp_pmd_table used to keep early shadow mapped
+ * until full shadow setup will be finished.
+ */
+ orig_ttbr0 = get_ttbr0();
+
+#ifdef CONFIG_ARM_LPAE
+ memcpy(tmp_pmd_table,
+ pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_START)),
+ sizeof(tmp_pmd_table));
+ memcpy(tmp_pgd_table, swapper_pg_dir, sizeof(tmp_pgd_table));
+ set_pgd(&tmp_pgd_table[pgd_index(KASAN_SHADOW_START)],
+ __pgd(__pa(tmp_pmd_table) | PMD_TYPE_TABLE | L_PGD_SWAPPER));
+ set_ttbr0(__pa(tmp_pgd_table));
+#else
+ memcpy(tmp_pgd_table, swapper_pg_dir, sizeof(tmp_pgd_table));
+ set_ttbr0((u64)__pa(tmp_pgd_table));
+#endif
+ flush_cache_all();
+ local_flush_bp_all();
+ local_flush_tlb_all();
+
+ clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
+
+ kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)VMALLOC_START),
+ kasan_mem_to_shadow((void *)-1UL) + 1);
+
+ for_each_memblock(memory, reg) {
+ void *start = __va(reg->base);
+ void *end = __va(reg->base + reg->size);
+
+ if (reg->base + reg->size > arm_lowmem_limit)
+ end = __va(arm_lowmem_limit);
+ if (start >= end)
+ break;
+
+ create_mapping((unsigned long)kasan_mem_to_shadow(start),
+ (unsigned long)kasan_mem_to_shadow(end),
+ NUMA_NO_NODE);
+ }
+
+ /*1.the module's global variable is in MODULES_VADDR ~ MODULES_END,
+ * so we need mapping.
+ *2.PKMAP_BASE ~ PKMAP_BASE+PMD_SIZE's shadow and MODULES_VADDR
+ * ~ MODULES_END's shadow is in the same PMD_SIZE, so we cant
+ * use kasan_populate_zero_shadow.
+ */
+ create_mapping(
+ (unsigned long)kasan_mem_to_shadow((void *)MODULES_VADDR),
+
+ (unsigned long)kasan_mem_to_shadow((void *)(PKMAP_BASE +
+ PMD_SIZE)),
+ NUMA_NO_NODE);
+
+ /*
+ * KAsan may reuse the contents of kasan_zero_pte directly, so we
+ * should make sure that it maps the zero page read-only.
+ */
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ set_pte_at(&init_mm, KASAN_SHADOW_START + i*PAGE_SIZE,
+ &kasan_zero_pte[i],
+ pfn_pte(virt_to_pfn(kasan_zero_page),
+ __pgprot(pgprot_val(PAGE_KERNEL)
+ | L_PTE_RDONLY)));
+ memset(kasan_zero_page, 0, PAGE_SIZE);
+ set_ttbr0(orig_ttbr0);
+ flush_cache_all();
+ local_flush_bp_all();
+ local_flush_tlb_all();
+ pr_info("Kernel address sanitizer initialized\n");
+ init_task.kasan_depth = 0;
+}
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index 61e281c..4644a21 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -64,6 +64,20 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
new_pmd = pmd_alloc(mm, new_pud, 0);
if (!new_pmd)
goto no_pmd;
+#ifdef CONFIG_KASAN
+ /*
+ *Copy PMD table for KASAN shadow mappings.
+ */
+ init_pgd = pgd_offset_k(TASK_SIZE);
+ init_pud = pud_offset(init_pgd, TASK_SIZE);
+ init_pmd = pmd_offset(init_pud, TASK_SIZE);
+ new_pmd = pmd_offset(new_pud, TASK_SIZE);
+ memcpy(new_pmd, init_pmd,
+ (pmd_index(MODULES_VADDR)-pmd_index(TASK_SIZE))
+ * sizeof(pmd_t));
+ clean_dcache_area(new_pmd, PTRS_PER_PMD*sizeof(pmd_t));
+#endif
+
#endif
if (!vectors_high()) {
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index e13d911..6d32623 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -358,8 +358,9 @@ void kasan_cache_create(struct kmem_cache *cache, size_t *size,
if (redzone_adjust > 0)
*size += redzone_adjust;
- *size = min(KMALLOC_MAX_SIZE, max(*size, cache->object_size +
- optimal_redzone(cache->object_size)));
+ *size = min_t(unsigned long, KMALLOC_MAX_SIZE,
+ max(*size, cache->object_size +
+ optimal_redzone(cache->object_size)));
/*
* If the metadata doesn't fit, don't enable KASAN at all.
--
2.9.0
^ permalink raw reply related [flat|nested] 12+ messages in thread