From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from smtp.outflux.net (smtp.outflux.net [IPv6:2001:19d0:2:6:c0de:0:736d:7470]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by lists.ozlabs.org (Postfix) with ESMTPS id 04CCC1A02E2 for ; Thu, 5 Mar 2015 08:12:32 +1100 (AEDT) From: Kees Cook To: akpm@linux-foundation.org Subject: [PATCH v4 02/10] x86: standardize mmap_rnd() usage Date: Wed, 4 Mar 2015 13:10:46 -0800 Message-Id: <1425503454-7531-3-git-send-email-keescook@chromium.org> In-Reply-To: <1425503454-7531-1-git-send-email-keescook@chromium.org> References: <1425503454-7531-1-git-send-email-keescook@chromium.org> Cc: linux-mips@linux-mips.org, Arun Chandran , Heiko Carstens , linux-kernel@vger.kernel.org, Min-Hua Chen , Paul Mackerras , Ismael Ripoll , Yann Droneaud , linux-s390@vger.kernel.org, Russell King , Andrey Ryabinin , x86@kernel.org, Hector Marco-Gisbert , Ingo Molnar , "David A. Long" , Catalin Marinas , Borislav Petkov , Ben Hutchings , Kees Cook , Will Deacon , linux-fsdevel@vger.kernel.org, Alexander Viro , Michael Holzheu , linux-arm-kernel@lists.infradead.org, Jeff Bailey , Paul Burton , Oleg Nesterov , Ralf Baechle , Andy Lutomirski , Vineeth Vijayan , Markos Chandras , =?UTF-8?q?Jan-Simon=20M=C3=B6ller?= , Martin Schwidefsky , linux390@de.ibm.com, linuxppc-dev@lists.ozlabs.org, Alex Smith List-Id: Linux on PowerPC Developers Mail List List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , In preparation for splitting out ET_DYN ASLR, this refactors the use of mmap_rnd() to be used similarly to arm, and extracts the checking of PF_RANDOMIZE. Signed-off-by: Kees Cook --- arch/x86/mm/mmap.c | 36 ++++++++++++++++++++---------------- 1 file changed, 20 insertions(+), 16 deletions(-) diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index df4552bd239e..ebfa52030d5c 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -67,22 +67,21 @@ static int mmap_is_legacy(void) static unsigned long mmap_rnd(void) { - unsigned long rnd = 0; + unsigned long rnd; /* - * 8 bits of randomness in 32bit mmaps, 20 address space bits - * 28 bits of randomness in 64bit mmaps, 40 address space bits - */ - if (current->flags & PF_RANDOMIZE) { - if (mmap_is_ia32()) - rnd = get_random_int() % (1<<8); - else - rnd = get_random_int() % (1<<28); - } + * 8 bits of randomness in 32bit mmaps, 20 address space bits + * 28 bits of randomness in 64bit mmaps, 40 address space bits + */ + if (mmap_is_ia32()) + rnd = (unsigned long)get_random_int() % (1<<8); + else + rnd = (unsigned long)get_random_int() % (1<<28); + return rnd << PAGE_SHIFT; } -static unsigned long mmap_base(void) +static unsigned long mmap_base(unsigned long rnd) { unsigned long gap = rlimit(RLIMIT_STACK); @@ -91,19 +90,19 @@ static unsigned long mmap_base(void) else if (gap > MAX_GAP) gap = MAX_GAP; - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd()); + return PAGE_ALIGN(TASK_SIZE - gap - rnd); } /* * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64 * does, but not when emulating X86_32 */ -static unsigned long mmap_legacy_base(void) +static unsigned long mmap_legacy_base(unsigned long rnd) { if (mmap_is_ia32()) return TASK_UNMAPPED_BASE; else - return TASK_UNMAPPED_BASE + mmap_rnd(); + return TASK_UNMAPPED_BASE + rnd; } /* @@ -112,13 +111,18 @@ static unsigned long mmap_legacy_base(void) */ void arch_pick_mmap_layout(struct mm_struct *mm) { - mm->mmap_legacy_base = mmap_legacy_base(); - mm->mmap_base = mmap_base(); + unsigned long random_factor = 0UL; + + if (current->flags & PF_RANDOMIZE) + random_factor = mmap_rnd(); + + mm->mmap_legacy_base = mmap_legacy_base(random_factor); if (mmap_is_legacy()) { mm->mmap_base = mm->mmap_legacy_base; mm->get_unmapped_area = arch_get_unmapped_area; } else { + mm->mmap_base = mmap_base(random_factor); mm->get_unmapped_area = arch_get_unmapped_area_topdown; } } -- 1.9.1