* [PATCH] arm64: mmu: Place guard page after mapping of kernel image
@ 2017-07-27 17:03 Will Deacon
2017-07-28 8:46 ` Ard Biesheuvel
2017-07-31 10:24 ` Mark Rutland
0 siblings, 2 replies; 3+ messages in thread
From: Will Deacon @ 2017-07-27 17:03 UTC (permalink / raw)
To: linux-arm-kernel
The vast majority of virtual allocations in the vmalloc region are followed
by a guard page, which can help to avoid overruning on vma into another,
which may map a read-sensitive device.
This patch adds a guard page to the end of the kernel image mapping (i.e.
following the data/bss segments).
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
---
arch/arm64/mm/mmu.c | 18 +++++++++++-------
1 file changed, 11 insertions(+), 7 deletions(-)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 23c2d89a362e..f1eb15e0e864 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -496,7 +496,7 @@ void mark_rodata_ro(void)
static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
pgprot_t prot, struct vm_struct *vma,
- int flags)
+ int flags, unsigned long vm_flags)
{
phys_addr_t pa_start = __pa_symbol(va_start);
unsigned long size = va_end - va_start;
@@ -507,10 +507,13 @@ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
__create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
early_pgtable_alloc, flags);
+ if (!(vm_flags & VM_NO_GUARD))
+ size += PAGE_SIZE;
+
vma->addr = va_start;
vma->phys_addr = pa_start;
vma->size = size;
- vma->flags = VM_MAP;
+ vma->flags = VM_MAP | vm_flags;
vma->caller = __builtin_return_address(0);
vm_area_add_early(vma);
@@ -541,14 +544,15 @@ static void __init map_kernel(pgd_t *pgd)
* Only rodata will be remapped with different permissions later on,
* all other segments are allowed to use contiguous mappings.
*/
- map_kernel_segment(pgd, _text, _etext, text_prot, &vmlinux_text, 0);
+ map_kernel_segment(pgd, _text, _etext, text_prot, &vmlinux_text, 0,
+ VM_NO_GUARD);
map_kernel_segment(pgd, __start_rodata, __inittext_begin, PAGE_KERNEL,
- &vmlinux_rodata, NO_CONT_MAPPINGS);
+ &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD);
map_kernel_segment(pgd, __inittext_begin, __inittext_end, text_prot,
- &vmlinux_inittext, 0);
+ &vmlinux_inittext, 0, VM_NO_GUARD);
map_kernel_segment(pgd, __initdata_begin, __initdata_end, PAGE_KERNEL,
- &vmlinux_initdata, 0);
- map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data, 0);
+ &vmlinux_initdata, 0, VM_NO_GUARD);
+ map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) {
/*
--
2.1.4
^ permalink raw reply related [flat|nested] 3+ messages in thread
* [PATCH] arm64: mmu: Place guard page after mapping of kernel image
2017-07-27 17:03 [PATCH] arm64: mmu: Place guard page after mapping of kernel image Will Deacon
@ 2017-07-28 8:46 ` Ard Biesheuvel
2017-07-31 10:24 ` Mark Rutland
1 sibling, 0 replies; 3+ messages in thread
From: Ard Biesheuvel @ 2017-07-28 8:46 UTC (permalink / raw)
To: linux-arm-kernel
On 27 July 2017 at 18:03, Will Deacon <will.deacon@arm.com> wrote:
> The vast majority of virtual allocations in the vmalloc region are followed
> by a guard page, which can help to avoid overruning on vma into another,
> which may map a read-sensitive device.
>
> This patch adds a guard page to the end of the kernel image mapping (i.e.
> following the data/bss segments).
>
> Cc: Mark Rutland <mark.rutland@arm.com>
> Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
> Signed-off-by: Will Deacon <will.deacon@arm.com>
> ---
> arch/arm64/mm/mmu.c | 18 +++++++++++-------
> 1 file changed, 11 insertions(+), 7 deletions(-)
>
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index 23c2d89a362e..f1eb15e0e864 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -496,7 +496,7 @@ void mark_rodata_ro(void)
>
> static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
> pgprot_t prot, struct vm_struct *vma,
> - int flags)
> + int flags, unsigned long vm_flags)
> {
> phys_addr_t pa_start = __pa_symbol(va_start);
> unsigned long size = va_end - va_start;
> @@ -507,10 +507,13 @@ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
> __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
> early_pgtable_alloc, flags);
>
> + if (!(vm_flags & VM_NO_GUARD))
> + size += PAGE_SIZE;
> +
> vma->addr = va_start;
> vma->phys_addr = pa_start;
> vma->size = size;
> - vma->flags = VM_MAP;
> + vma->flags = VM_MAP | vm_flags;
> vma->caller = __builtin_return_address(0);
>
> vm_area_add_early(vma);
> @@ -541,14 +544,15 @@ static void __init map_kernel(pgd_t *pgd)
> * Only rodata will be remapped with different permissions later on,
> * all other segments are allowed to use contiguous mappings.
> */
> - map_kernel_segment(pgd, _text, _etext, text_prot, &vmlinux_text, 0);
> + map_kernel_segment(pgd, _text, _etext, text_prot, &vmlinux_text, 0,
> + VM_NO_GUARD);
> map_kernel_segment(pgd, __start_rodata, __inittext_begin, PAGE_KERNEL,
> - &vmlinux_rodata, NO_CONT_MAPPINGS);
> + &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD);
> map_kernel_segment(pgd, __inittext_begin, __inittext_end, text_prot,
> - &vmlinux_inittext, 0);
> + &vmlinux_inittext, 0, VM_NO_GUARD);
> map_kernel_segment(pgd, __initdata_begin, __initdata_end, PAGE_KERNEL,
> - &vmlinux_initdata, 0);
> - map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data, 0);
> + &vmlinux_initdata, 0, VM_NO_GUARD);
> + map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
>
> if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) {
> /*
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
^ permalink raw reply [flat|nested] 3+ messages in thread
* [PATCH] arm64: mmu: Place guard page after mapping of kernel image
2017-07-27 17:03 [PATCH] arm64: mmu: Place guard page after mapping of kernel image Will Deacon
2017-07-28 8:46 ` Ard Biesheuvel
@ 2017-07-31 10:24 ` Mark Rutland
1 sibling, 0 replies; 3+ messages in thread
From: Mark Rutland @ 2017-07-31 10:24 UTC (permalink / raw)
To: linux-arm-kernel
On Thu, Jul 27, 2017 at 06:03:55PM +0100, Will Deacon wrote:
> The vast majority of virtual allocations in the vmalloc region are followed
> by a guard page, which can help to avoid overruning on vma into another,
> which may map a read-sensitive device.
>
> This patch adds a guard page to the end of the kernel image mapping (i.e.
> following the data/bss segments).
>
> Cc: Mark Rutland <mark.rutland@arm.com>
> Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
> Signed-off-by: Will Deacon <will.deacon@arm.com>
Looks sane to me.
Acked-by: Mark Rutland <mark.rutland@arm.com>
Mark.
> ---
> arch/arm64/mm/mmu.c | 18 +++++++++++-------
> 1 file changed, 11 insertions(+), 7 deletions(-)
>
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index 23c2d89a362e..f1eb15e0e864 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -496,7 +496,7 @@ void mark_rodata_ro(void)
>
> static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
> pgprot_t prot, struct vm_struct *vma,
> - int flags)
> + int flags, unsigned long vm_flags)
> {
> phys_addr_t pa_start = __pa_symbol(va_start);
> unsigned long size = va_end - va_start;
> @@ -507,10 +507,13 @@ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
> __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
> early_pgtable_alloc, flags);
>
> + if (!(vm_flags & VM_NO_GUARD))
> + size += PAGE_SIZE;
> +
> vma->addr = va_start;
> vma->phys_addr = pa_start;
> vma->size = size;
> - vma->flags = VM_MAP;
> + vma->flags = VM_MAP | vm_flags;
> vma->caller = __builtin_return_address(0);
>
> vm_area_add_early(vma);
> @@ -541,14 +544,15 @@ static void __init map_kernel(pgd_t *pgd)
> * Only rodata will be remapped with different permissions later on,
> * all other segments are allowed to use contiguous mappings.
> */
> - map_kernel_segment(pgd, _text, _etext, text_prot, &vmlinux_text, 0);
> + map_kernel_segment(pgd, _text, _etext, text_prot, &vmlinux_text, 0,
> + VM_NO_GUARD);
> map_kernel_segment(pgd, __start_rodata, __inittext_begin, PAGE_KERNEL,
> - &vmlinux_rodata, NO_CONT_MAPPINGS);
> + &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD);
> map_kernel_segment(pgd, __inittext_begin, __inittext_end, text_prot,
> - &vmlinux_inittext, 0);
> + &vmlinux_inittext, 0, VM_NO_GUARD);
> map_kernel_segment(pgd, __initdata_begin, __initdata_end, PAGE_KERNEL,
> - &vmlinux_initdata, 0);
> - map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data, 0);
> + &vmlinux_initdata, 0, VM_NO_GUARD);
> + map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
>
> if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) {
> /*
> --
> 2.1.4
>
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2017-07-31 10:24 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2017-07-27 17:03 [PATCH] arm64: mmu: Place guard page after mapping of kernel image Will Deacon
2017-07-28 8:46 ` Ard Biesheuvel
2017-07-31 10:24 ` Mark Rutland
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox