From mboxrd@z Thu Jan 1 00:00:00 1970 From: Laurent Dufour Subject: [PATCH v4 2/2] powerpc/mm: Tracking vDSO remap Date: Thu, 26 Mar 2015 18:37:53 +0100 Message-ID: <7fdae652993cf88bdd633d65e5a8f81c7ad8a1e3.1427390952.git.ldufour@linux.vnet.ibm.com> References: Return-path: In-Reply-To: In-Reply-To: References: <20150326141730.GA23060@gmail.com> Sender: owner-linux-mm@kvack.org To: Benjamin Herrenschmidt , Paul Mackerras , Michael Ellerman , Jeff Dike , Richard Weinberger , Guan Xuetao , Thomas Gleixner , Ingo Molnar , "H. Peter Anvin" , x86@kernel.org, Arnd Bergmann , linuxppc-dev@lists.ozlabs.org, linux-kernel@vger.kernel.org, linux-s390@vger.kernel.org, user-mode-linux-devel@lists.sourceforge.net, user-mode-linux-user@lists.sourceforge.net, linux-arch@vger.kernel.org, linux-mm@kvack.org Cc: cov@codeaurora.org, criu@openvz.org List-Id: linux-arch.vger.kernel.org Some processes (CRIU) are moving the vDSO area using the mremap system call. As a consequence the kernel reference to the vDSO base address is no more valid and the signal return frame built once the vDSO has been moved is not pointing to the new sigreturn address. This patch handles vDSO remapping and unmapping. Moving or unmapping partially the vDSO lead to invalidate it from the kernel point of view. Signed-off-by: Laurent Dufour --- arch/powerpc/include/asm/mmu_context.h | 32 +++++++++++++++++++++++++++- arch/powerpc/kernel/vdso.c | 39 ++++++++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 73382eba02dc..67734ce8be67 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -8,7 +8,6 @@ #include #include #include -#include #include /* @@ -109,5 +108,36 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, #endif } +static inline void arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) +{ +} + +static inline void arch_exit_mmap(struct mm_struct *mm) +{ +} + +extern void arch_vdso_remap(struct mm_struct *mm, + unsigned long old_start, unsigned long old_end, + unsigned long new_start, unsigned long new_end); +static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + arch_vdso_remap(mm, start, end, 0, 0); +} + +static inline void arch_bprm_mm_init(struct mm_struct *mm, + struct vm_area_struct *vma) +{ +} + +#define __HAVE_ARCH_REMAP +static inline void arch_remap(struct mm_struct *mm, + unsigned long old_start, unsigned long old_end, + unsigned long new_start, unsigned long new_end) +{ + arch_vdso_remap(mm, old_start, old_end, new_start, new_end); +} + #endif /* __KERNEL__ */ #endif /* __ASM_POWERPC_MMU_CONTEXT_H */ diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 305eb0d9b768..a11b5d8f36d6 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -283,6 +283,45 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) return rc; } +void arch_vdso_remap(struct mm_struct *mm, + unsigned long old_start, unsigned long old_end, + unsigned long new_start, unsigned long new_end) +{ + unsigned long vdso_end, vdso_start; + + if (!mm->context.vdso_base) + return; + vdso_start = mm->context.vdso_base; + +#ifdef CONFIG_PPC64 + /* Calling is_32bit_task() implies that we are dealing with the + * current process memory. If there is a call path where mm is not + * owned by the current task, then we'll have need to store the + * vDSO size in the mm->context. + */ + BUG_ON(current->mm != mm); + if (is_32bit_task()) + vdso_end = vdso_start + (vdso32_pages << PAGE_SHIFT); + else + vdso_end = vdso_start + (vdso64_pages << PAGE_SHIFT); +#else + vdso_end = vdso_start + (vdso32_pages << PAGE_SHIFT); +#endif + vdso_end += (1<context.vdso_base = new_start; + else + mm->context.vdso_base = 0; + } +} + const char *arch_vma_name(struct vm_area_struct *vma) { if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base) -- 1.9.1 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: email@kvack.org From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from e06smtp11.uk.ibm.com ([195.75.94.107]:54991 "EHLO e06smtp11.uk.ibm.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751533AbbCZRiD (ORCPT ); Thu, 26 Mar 2015 13:38:03 -0400 Received: from /spool/local by e06smtp11.uk.ibm.com with IBM ESMTP SMTP Gateway: Authorized Use Only! Violators will be prosecuted for from ; Thu, 26 Mar 2015 17:38:01 -0000 From: Laurent Dufour Subject: [PATCH v4 2/2] powerpc/mm: Tracking vDSO remap Date: Thu, 26 Mar 2015 18:37:53 +0100 Message-ID: <7fdae652993cf88bdd633d65e5a8f81c7ad8a1e3.1427390952.git.ldufour@linux.vnet.ibm.com> In-Reply-To: References: In-Reply-To: References: <20150326141730.GA23060@gmail.com> Sender: linux-arch-owner@vger.kernel.org List-ID: To: Benjamin Herrenschmidt , Paul Mackerras , Michael Ellerman , Jeff Dike , Richard Weinberger , Guan Xuetao , Thomas Gleixner , Ingo Molnar , "H. Peter Anvin" , x86@kernel.org, Arnd Bergmann , linuxppc-dev@lists.ozlabs.org, linux-kernel@vger.kernel.org, linux-s390@vger.kernel.org, user-mode-linux-devel@lists.sourceforge.net, user-mode-linux-user@lists.sourceforge.net, linux-arch@vger.kernel.org, linux-mm@kvack.org Cc: cov@codeaurora.org, criu@openvz.org Message-ID: <20150326173753.H3lQocgq1CVd9I0T53YqBniJ7-yWuqdwHvh9tyCddrY@z> Some processes (CRIU) are moving the vDSO area using the mremap system call. As a consequence the kernel reference to the vDSO base address is no more valid and the signal return frame built once the vDSO has been moved is not pointing to the new sigreturn address. This patch handles vDSO remapping and unmapping. Moving or unmapping partially the vDSO lead to invalidate it from the kernel point of view. Signed-off-by: Laurent Dufour --- arch/powerpc/include/asm/mmu_context.h | 32 +++++++++++++++++++++++++++- arch/powerpc/kernel/vdso.c | 39 ++++++++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 73382eba02dc..67734ce8be67 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -8,7 +8,6 @@ #include #include #include -#include #include /* @@ -109,5 +108,36 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, #endif } +static inline void arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) +{ +} + +static inline void arch_exit_mmap(struct mm_struct *mm) +{ +} + +extern void arch_vdso_remap(struct mm_struct *mm, + unsigned long old_start, unsigned long old_end, + unsigned long new_start, unsigned long new_end); +static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + arch_vdso_remap(mm, start, end, 0, 0); +} + +static inline void arch_bprm_mm_init(struct mm_struct *mm, + struct vm_area_struct *vma) +{ +} + +#define __HAVE_ARCH_REMAP +static inline void arch_remap(struct mm_struct *mm, + unsigned long old_start, unsigned long old_end, + unsigned long new_start, unsigned long new_end) +{ + arch_vdso_remap(mm, old_start, old_end, new_start, new_end); +} + #endif /* __KERNEL__ */ #endif /* __ASM_POWERPC_MMU_CONTEXT_H */ diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 305eb0d9b768..a11b5d8f36d6 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -283,6 +283,45 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) return rc; } +void arch_vdso_remap(struct mm_struct *mm, + unsigned long old_start, unsigned long old_end, + unsigned long new_start, unsigned long new_end) +{ + unsigned long vdso_end, vdso_start; + + if (!mm->context.vdso_base) + return; + vdso_start = mm->context.vdso_base; + +#ifdef CONFIG_PPC64 + /* Calling is_32bit_task() implies that we are dealing with the + * current process memory. If there is a call path where mm is not + * owned by the current task, then we'll have need to store the + * vDSO size in the mm->context. + */ + BUG_ON(current->mm != mm); + if (is_32bit_task()) + vdso_end = vdso_start + (vdso32_pages << PAGE_SHIFT); + else + vdso_end = vdso_start + (vdso64_pages << PAGE_SHIFT); +#else + vdso_end = vdso_start + (vdso32_pages << PAGE_SHIFT); +#endif + vdso_end += (1<context.vdso_base = new_start; + else + mm->context.vdso_base = 0; + } +} + const char *arch_vma_name(struct vm_area_struct *vma) { if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base) -- 1.9.1