From mboxrd@z Thu Jan 1 00:00:00 1970 From: Mike Rapoport Subject: [PATCH v2 07/12] x86/mm: simplify init_trampoline() and surrounding logic Date: Thu, 14 May 2020 20:03:22 +0300 Message-ID: <20200514170327.31389-8-rppt@kernel.org> References: <20200514170327.31389-1-rppt@kernel.org> Mime-Version: 1.0 Content-Transfer-Encoding: 8bit Return-path: DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=lists.infradead.org; s=bombadil.20170209; h=Sender:Cc:List-Subscribe: List-Help:List-Post:List-Archive:List-Unsubscribe:List-Id: Content-Transfer-Encoding:MIME-Version:References:In-Reply-To:Message-Id:Date :Subject:To:From:Reply-To:Content-Type:Content-ID:Content-Description: Resent-Date:Resent-From:Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID: List-Owner; bh=62K3e+ffz6Ysc+g3gsjI7OYJpbBTUjDwQwBP/3owkdE=; b=gu6aOi6Z/u+EKB k1TF/bs8Q602cn7XtskeioDBV/rdDN70MQj4+wfXgFbZiIT5838sB1JjWlHv4BLSj3KdZ+6ATFlnH 2g28NgQyS46VuDDinp+QVAt0ChFAnC0muCXoavfxKLxigLhe6NEKMN66ua4YWl1O1aYFVhjBGqaXA 4OjbFndTYbrGZSAu9elofLOf949wkj8uWLJfsNYwx0inYBtHdAo/oWhMuW46j0KIZmLKKY82oWHrO own+/0O1QxwFZ799KY9okaFveLHl1f2a2CIITm2G41ApwdGe5XmcNY65JJVE19UdeNZBiI2HR06ih Cz57uuR8sPtDvHkT6VoA==; DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1589475932; bh=K/UHYw95Cbq0NaSHgsoN3JMsxi95wLyOYWr1arL72zo=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=sEcMa/AOoixt02pYVe/vDy7JVFh96lPky3QdOlZVb10lcWkNc0uKh/yas6JMaP89G JH32rbcupUjo+fLYSauY0uQXDDOpauW21/znpqv8ShX1BqCFwpeD4RZIAqtgvbMK1P c7y/fZYEvkpZ6e5Yq8noYCOI+409c08VSlDuGLaQ= In-Reply-To: <20200514170327.31389-1-rppt@kernel.org> List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: "linux-riscv" Errors-To: linux-riscv-bounces+glpr-linux-riscv=m.gmane-mx.org@lists.infradead.org Content-Type: text/plain; charset="us-ascii" To: linux-kernel@vger.kernel.org Cc: Rich Felker , linux-ia64@vger.kernel.org, linux-sh@vger.kernel.org, Catalin Marinas , Heiko Carstens , Max Filippov , Guo Ren , Matthew Wilcox , sparclinux@vger.kernel.org, linux-hexagon@vger.kernel.org, linux-riscv@lists.infradead.org, Vincent Chen , Will Deacon , Greg Ungerer , linux-arch@vger.kernel.org, linux-s390@vger.kernel.org, linux-c6x-dev@linux-c6x.org, Brian Cain , Michael Ellerman , Helge Deller , x86@kernel.org, Russell King , Ley Foon Tan , Mike Rapoport , Ingo Molnar , Geert Uytterhoeven , linux-parisc@vger.ker From: Mike Rapoport There are three cases for the trampoline initialization: * 32-bit does nothing * 64-bit with kaslr disabled simply copies a PGD entry from the direct map to the trampoline PGD * 64-bit with kaslr enabled maps the real mode trampoline at PUD level These cases are currently differentiated by a bunch of ifdefs inside asm/include/pgtable.h and the case of 64-bits with kaslr on uses pgd_index() helper. Replacing the ifdefs with a static function in arch/x86/mm/init.c gives clearer code and allows moving pgd_index() to the generic implementation in include/linux/pgtable.h Signed-off-by: Mike Rapoport --- arch/x86/include/asm/kaslr.h | 2 ++ arch/x86/include/asm/pgtable.h | 15 +-------------- arch/x86/include/asm/setup.h | 9 +++++++++ arch/x86/mm/init.c | 22 ++++++++++++++++++++++ arch/x86/mm/kaslr.c | 33 +-------------------------------- 5 files changed, 35 insertions(+), 46 deletions(-) diff --git a/arch/x86/include/asm/kaslr.h b/arch/x86/include/asm/kaslr.h index db7ba2feb947..0648190467ba 100644 --- a/arch/x86/include/asm/kaslr.h +++ b/arch/x86/include/asm/kaslr.h @@ -6,8 +6,10 @@ unsigned long kaslr_get_random_long(const char *purpose); #ifdef CONFIG_RANDOMIZE_MEMORY void kernel_randomize_memory(void); +void init_trampoline_kaslr(void); #else static inline void kernel_randomize_memory(void) { } +static inline void init_trampoline_kaslr(void) {} #endif /* CONFIG_RANDOMIZE_MEMORY */ #endif diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index d24f8e1f7250..6366136b0e46 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -1070,27 +1070,14 @@ void init_mem_mapping(void); void early_alloc_pgt_buf(void); extern void memblock_find_dma_reserve(void); + #ifdef CONFIG_X86_64 -/* Realmode trampoline initialization. */ extern pgd_t trampoline_pgd_entry; -static inline void __meminit init_trampoline_default(void) -{ - /* Default trampoline pgd value */ - trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)]; -} void __init poking_init(void); unsigned long init_memory_mapping(unsigned long start, unsigned long end, pgprot_t prot); - -# ifdef CONFIG_RANDOMIZE_MEMORY -void __meminit init_trampoline(void); -# else -# define init_trampoline init_trampoline_default -# endif -#else -static inline void init_trampoline(void) { } #endif /* local pte updates need not use xchg for locking */ diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index ed8ec011a9fd..d95cacf210bb 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -78,6 +78,15 @@ static inline bool kaslr_enabled(void) return !!(boot_params.hdr.loadflags & KASLR_FLAG); } +/* + * Apply no randomization if KASLR was disabled at boot or if KASAN + * is enabled. KASAN shadow mappings rely on regions being PGD aligned. + */ +static inline bool kaslr_memory_enabled(void) +{ + return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN); +} + static inline unsigned long kaslr_offset(void) { return (unsigned long)&_text - __START_KERNEL; diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 235dd0e35741..e225ebb25197 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -682,6 +682,28 @@ static void __init memory_map_bottom_up(unsigned long map_start, } } +/* + * The real mode trampoline, which is required for bootstrapping CPUs + * occupies only a small area under the low 1MB. See reserve_real_mode() + * for details. + * + * If KASLR is disabled the first PGD entry of the direct mapping is copied + * to map the real mode trampoline. + * + * If KASLR is enabled, copy only the PUD which covers the low 1MB + * area. This limits the randomization granularity to 1GB for both 4-level + * and 5-level paging. + */ +static void __init init_trampoline(void) +{ +#ifdef CONFIG_X86_64 + if (!kaslr_memory_enabled()) + trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)]; + else + init_trampoline_kaslr(); +#endif +} + void __init init_mem_mapping(void) { unsigned long end; diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c index faf02e1e1517..fb620fd9dae9 100644 --- a/arch/x86/mm/kaslr.c +++ b/arch/x86/mm/kaslr.c @@ -61,15 +61,6 @@ static inline unsigned long get_padding(struct kaslr_memory_region *region) return (region->size_tb << TB_SHIFT); } -/* - * Apply no randomization if KASLR was disabled at boot or if KASAN - * is enabled. KASAN shadow mappings rely on regions being PGD aligned. - */ -static inline bool kaslr_memory_enabled(void) -{ - return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN); -} - /* Initialize base and padding for each memory region randomized with KASLR */ void __init kernel_randomize_memory(void) { @@ -148,7 +139,7 @@ void __init kernel_randomize_memory(void) } } -static void __meminit init_trampoline_pud(void) +void __meminit init_trampoline_kaslr(void) { pud_t *pud_page_tramp, *pud, *pud_tramp; p4d_t *p4d_page_tramp, *p4d, *p4d_tramp; @@ -189,25 +180,3 @@ static void __meminit init_trampoline_pud(void) __pgd(_KERNPG_TABLE | __pa(pud_page_tramp))); } } - -/* - * The real mode trampoline, which is required for bootstrapping CPUs - * occupies only a small area under the low 1MB. See reserve_real_mode() - * for details. - * - * If KASLR is disabled the first PGD entry of the direct mapping is copied - * to map the real mode trampoline. - * - * If KASLR is enabled, copy only the PUD which covers the low 1MB - * area. This limits the randomization granularity to 1GB for both 4-level - * and 5-level paging. - */ -void __meminit init_trampoline(void) -{ - if (!kaslr_memory_enabled()) { - init_trampoline_default(); - return; - } - - init_trampoline_pud(); -} -- 2.26.2