From mboxrd@z Thu Jan 1 00:00:00 1970 From: keescook@chromium.org (Kees Cook) Date: Thu, 13 Feb 2014 17:04:09 -0800 Subject: [PATCH 1/2] ARM: mm: allow for stricter kernel memory perms In-Reply-To: <1392339850-18686-1-git-send-email-keescook@chromium.org> References: <1392339850-18686-1-git-send-email-keescook@chromium.org> Message-ID: <1392339850-18686-2-git-send-email-keescook@chromium.org> To: linux-arm-kernel@lists.infradead.org List-Id: linux-arm-kernel.lists.infradead.org Adds CONFIG_ARM_KERNMEM_PERMS to separate the kernel memory regions into section-sized areas that can have different permisions. Performs the permission changes during free_initmem. This uses section size instead of PMD size to reduce memory caps on non-LPAE systems. Based on work by Brad Spengler, Larry Bassel, and Laura Abbott. Signed-off-by: Kees Cook --- arch/arm/kernel/vmlinux.lds.S | 17 +++++++++ arch/arm/mm/Kconfig | 10 +++++ arch/arm/mm/init.c | 84 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 111 insertions(+) diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 7bcee5c9b604..08fa667ef2f1 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -8,6 +8,9 @@ #include #include #include +#ifdef CONFIG_ARM_KERNMEM_PERMS +#include +#endif #define PROC_INFO \ . = ALIGN(4); \ @@ -90,6 +93,11 @@ SECTIONS _text = .; HEAD_TEXT } + +#ifdef CONFIG_ARM_KERNMEM_PERMS + . = ALIGN(1< #include +#ifdef CONFIG_ARM_KERNMEM_PERMS +#include +#include +#endif + #include #include @@ -621,11 +626,90 @@ void __init mem_init(void) } } +#ifdef CONFIG_ARM_KERNMEM_PERMS +struct section_perm { + unsigned long start; + unsigned long end; + pmdval_t prot; +}; + +struct section_perm __initdata section_perms[] = { + /* Make pages tables, etc before _stext RW (set NX). */ + { + .start = PAGE_OFFSET, + .end = (unsigned long)_stext, + .prot = PMD_SECT_XN, + }, + /* Make init RW (set NX). */ + { + .start = (unsigned long)__init_begin, + .end = (unsigned long)_sdata, + .prot = PMD_SECT_XN, + }, + /* Make kernel code and rodata RX (set RO). */ + { + .start = (unsigned long)_stext, + .end = (unsigned long)__init_begin, +#ifdef CONFIG_ARM_LPAE + .prot = PMD_SECT_RDONLY, +#else + .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE, +#endif + }, +}; + +static inline void section_update(unsigned long addr, pmdval_t prot) +{ + pmd_t *pmd = pmd_off_k(addr); + +#ifdef CONFIG_ARM_LPAE + pmd[0] = __pmd(pmd_val(pmd[0]) | prot); +#else + if (addr & SECTION_SIZE) + pmd[1] = __pmd(pmd_val(pmd[1]) | prot); + else + pmd[0] = __pmd(pmd_val(pmd[0]) | prot); +#endif + flush_pmd_entry(pmd); +} + +static inline void fix_kernmem_perms(void) +{ + unsigned long addr; + int cpu_arch = cpu_architecture(); + unsigned int i, cr = get_cr(); + + if (cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) + return; + + for (i = 0; i < ARRAY_SIZE(section_perms); i++) { + if (!IS_ALIGNED(section_perms[i].start, SECTION_SIZE) || + !IS_ALIGNED(section_perms[i].end, SECTION_SIZE)) { + pr_err("BUG: section %lx-%lx not aligned to %lx\n", + section_perms[i].start, section_perms[i].end, + SECTION_SIZE); + continue; + } + + for (addr = section_perms[i].start; + addr < section_perms[i].end; + addr += SECTION_SIZE) + section_update(addr, section_perms[i].prot); + } +} +#else +static inline void fix_kernmem_perms(void) { } +#endif /* CONFIG_ARM_KERNMEM_PERMS */ + void free_initmem(void) { #ifdef CONFIG_HAVE_TCM extern char __tcm_start, __tcm_end; +#endif + + fix_kernmem_perms(); +#ifdef CONFIG_HAVE_TCM poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link"); #endif -- 1.7.9.5