From: Michel Lespinasse <walken@google.com>
To: Andrew Morton <akpm@linux-foundation.org>,
Rik van Riel <riel@redhat.com>, Hugh Dickins <hughd@google.com>,
linux-kernel@vger.kernel.org,
Russell King <linux@arm.linux.org.uk>,
Ralf Baechle <ralf@linux-mips.org>,
Paul Mundt <lethal@linux-sh.org>,
"David S. Miller" <davem@davemloft.net>,
Chris Metcalf <cmetcalf@tilera.com>,
x86@kernel.org, William Irwin <wli@holomorphy.com>
Cc: linux-mm@kvack.org, linux-arm-kernel@lists.infradead.org,
linux-mips@linux-mips.org, linux-sh@vger.kernel.org,
sparclinux@vger.kernel.org
Subject: [PATCH 06/16] mm: use vm_unmapped_area() on x86_64 architecture
Date: Mon, 5 Nov 2012 14:47:03 -0800 [thread overview]
Message-ID: <1352155633-8648-7-git-send-email-walken@google.com> (raw)
In-Reply-To: <1352155633-8648-1-git-send-email-walken@google.com>
Update the x86_64 arch_get_unmapped_area[_topdown] functions to make
use of vm_unmapped_area() instead of implementing a brute force search.
Signed-off-by: Michel Lespinasse <walken@google.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
---
arch/x86/include/asm/elf.h | 6 +-
arch/x86/kernel/sys_x86_64.c | 151 ++++++++---------------------------------
arch/x86/vdso/vma.c | 2 +-
3 files changed, 33 insertions(+), 126 deletions(-)
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 5939f44fe0c0..9c999c1674fa 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -354,12 +354,10 @@ static inline int mmap_is_ia32(void)
return 0;
}
-/* The first two values are special, do not change. See align_addr() */
+/* Do not change the values. See get_align_mask() */
enum align_flags {
ALIGN_VA_32 = BIT(0),
ALIGN_VA_64 = BIT(1),
- ALIGN_VDSO = BIT(2),
- ALIGN_TOPDOWN = BIT(3),
};
struct va_alignment {
@@ -368,5 +366,5 @@ struct va_alignment {
} ____cacheline_aligned;
extern struct va_alignment va_align;
-extern unsigned long align_addr(unsigned long, struct file *, enum align_flags);
+extern unsigned long align_vdso_addr(unsigned long);
#endif /* _ASM_X86_ELF_H */
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index b4d3c3927dd8..f00d006d60fd 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -21,37 +21,23 @@
/*
* Align a virtual address to avoid aliasing in the I$ on AMD F15h.
- *
- * @flags denotes the allocation direction - bottomup or topdown -
- * or vDSO; see call sites below.
*/
-unsigned long align_addr(unsigned long addr, struct file *filp,
- enum align_flags flags)
+static unsigned long get_align_mask(void)
{
- unsigned long tmp_addr;
-
/* handle 32- and 64-bit case with a single conditional */
if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32())))
- return addr;
+ return 0;
if (!(current->flags & PF_RANDOMIZE))
- return addr;
-
- if (!((flags & ALIGN_VDSO) || filp))
- return addr;
-
- tmp_addr = addr;
-
- /*
- * We need an address which is <= than the original
- * one only when in topdown direction.
- */
- if (!(flags & ALIGN_TOPDOWN))
- tmp_addr += va_align.mask;
+ return 0;
- tmp_addr &= ~va_align.mask;
+ return va_align.mask;
+}
- return tmp_addr;
+unsigned long align_vdso_addr(unsigned long addr)
+{
+ unsigned long align_mask = get_align_mask();
+ return (addr + align_mask) & ~align_mask;
}
static int __init control_va_addr_alignment(char *str)
@@ -126,7 +112,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- unsigned long start_addr;
+ struct vm_unmapped_area_info info;
unsigned long begin, end;
if (flags & MAP_FIXED)
@@ -144,50 +130,16 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
(!vma || addr + len <= vma->vm_start))
return addr;
}
- if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32))
- && len <= mm->cached_hole_size) {
- mm->cached_hole_size = 0;
- mm->free_area_cache = begin;
- }
- addr = mm->free_area_cache;
- if (addr < begin)
- addr = begin;
- start_addr = addr;
-
-full_search:
-
- addr = align_addr(addr, filp, 0);
-
- for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
- /* At this point: (!vma || addr < vma->vm_end). */
- if (end - len < addr) {
- /*
- * Start a new search - just in case we missed
- * some holes.
- */
- if (start_addr != begin) {
- start_addr = addr = begin;
- mm->cached_hole_size = 0;
- goto full_search;
- }
- return -ENOMEM;
- }
- if (!vma || addr + len <= vma->vm_start) {
- /*
- * Remember the place where we stopped the search:
- */
- mm->free_area_cache = addr + len;
- return addr;
- }
- if (addr + mm->cached_hole_size < vma->vm_start)
- mm->cached_hole_size = vma->vm_start - addr;
- addr = vma->vm_end;
- addr = align_addr(addr, filp, 0);
- }
+ info.flags = 0;
+ info.length = len;
+ info.low_limit = begin;
+ info.high_limit = end;
+ info.align_mask = filp ? get_align_mask() : 0;
+ info.align_offset = 0;
+ return vm_unmapped_area(&info);
}
-
unsigned long
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len, const unsigned long pgoff,
@@ -195,7 +147,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
- unsigned long addr = addr0, start_addr;
+ unsigned long addr = addr0;
+ struct vm_unmapped_area_info info;
/* requested length too big for entire address space */
if (len > TASK_SIZE)
@@ -217,51 +170,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
return addr;
}
- /* check if free_area_cache is useful for us */
- if (len <= mm->cached_hole_size) {
- mm->cached_hole_size = 0;
- mm->free_area_cache = mm->mmap_base;
- }
-
-try_again:
- /* either no address requested or can't fit in requested address hole */
- start_addr = addr = mm->free_area_cache;
-
- if (addr < len)
- goto fail;
-
- addr -= len;
- do {
- addr = align_addr(addr, filp, ALIGN_TOPDOWN);
-
- /*
- * Lookup failure means no vma is above this address,
- * else if new region fits below vma->vm_start,
- * return with success:
- */
- vma = find_vma(mm, addr);
- if (!vma || addr+len <= vma->vm_start)
- /* remember the address as a hint for next time */
- return mm->free_area_cache = addr;
-
- /* remember the largest hole we saw so far */
- if (addr + mm->cached_hole_size < vma->vm_start)
- mm->cached_hole_size = vma->vm_start - addr;
-
- /* try just below the current vma->vm_start */
- addr = vma->vm_start-len;
- } while (len < vma->vm_start);
-
-fail:
- /*
- * if hint left us with no space for the requested
- * mapping then try again:
- */
- if (start_addr != mm->mmap_base) {
- mm->free_area_cache = mm->mmap_base;
- mm->cached_hole_size = 0;
- goto try_again;
- }
+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+ info.length = len;
+ info.low_limit = PAGE_SIZE;
+ info.high_limit = mm->mmap_base;
+ info.align_mask = filp ? get_align_mask() : 0;
+ info.align_offset = 0;
+ addr = vm_unmapped_area(&info);
+ if (!(addr & ~PAGE_MASK))
+ return addr;
+ VM_BUG_ON(addr != -ENOMEM);
bottomup:
/*
@@ -270,14 +188,5 @@ bottomup:
* can happen with large stack limits and large mmap()
* allocations.
*/
- mm->cached_hole_size = ~0UL;
- mm->free_area_cache = TASK_UNMAPPED_BASE;
- addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
- /*
- * Restore the topdown base:
- */
- mm->free_area_cache = mm->mmap_base;
- mm->cached_hole_size = ~0UL;
-
- return addr;
+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
}
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
index 00aaf047b39f..431e87544411 100644
--- a/arch/x86/vdso/vma.c
+++ b/arch/x86/vdso/vma.c
@@ -141,7 +141,7 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
* unaligned here as a result of stack start randomization.
*/
addr = PAGE_ALIGN(addr);
- addr = align_addr(addr, NULL, ALIGN_VDSO);
+ addr = align_vdso_addr(addr);
return addr;
}
--
1.7.7.3
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2012-11-05 22:47 UTC|newest]
Thread overview: 42+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-11-05 22:46 [PATCH 00/16] mm: use augmented rbtrees for finding unmapped areas Michel Lespinasse
2012-11-05 22:46 ` [PATCH 01/16] mm: add anon_vma_lock to validate_mm() Michel Lespinasse
2012-11-05 23:06 ` Rik van Riel
2012-11-06 8:00 ` Michel Lespinasse
2012-11-05 22:46 ` [PATCH 02/16] mm: augment vma rbtree with rb_subtree_gap Michel Lespinasse
2012-11-05 22:47 ` [PATCH 03/16] mm: check rb_subtree_gap correctness Michel Lespinasse
2012-11-06 22:38 ` Andrew Morton
2012-11-09 14:13 ` Sasha Levin
2012-11-09 20:06 ` Hugh Dickins
2012-11-12 11:55 ` Michel Lespinasse
2012-11-05 22:47 ` [PATCH 04/16] mm: rearrange vm_area_struct for fewer cache misses Michel Lespinasse
2012-11-05 22:47 ` [PATCH 05/16] mm: vm_unmapped_area() lookup function Michel Lespinasse
2012-11-05 22:47 ` Michel Lespinasse [this message]
2012-11-05 22:47 ` [PATCH 07/16] mm: fix cache coloring on x86_64 architecture Michel Lespinasse
2012-11-06 22:38 ` Andrew Morton
2012-11-05 22:47 ` [PATCH 08/16] mm: use vm_unmapped_area() in hugetlbfs Michel Lespinasse
2012-11-05 23:33 ` Rik van Riel
2012-11-05 22:47 ` [PATCH 09/16] mm: use vm_unmapped_area() in hugetlbfs on i386 architecture Michel Lespinasse
2012-11-05 23:07 ` Rik van Riel
2012-11-06 22:38 ` Andrew Morton
2012-11-06 22:48 ` Rik van Riel
2012-11-05 22:47 ` [PATCH 10/16] mm: use vm_unmapped_area() on mips architecture Michel Lespinasse
2012-11-05 23:34 ` Rik van Riel
2012-11-06 22:38 ` Andrew Morton
2012-11-05 22:47 ` [PATCH 11/16] mm: use vm_unmapped_area() on arm architecture Michel Lespinasse
2012-11-05 23:35 ` Rik van Riel
2012-11-06 22:38 ` Andrew Morton
2012-11-05 22:47 ` [PATCH 12/16] mm: use vm_unmapped_area() on sh architecture Michel Lespinasse
2012-11-05 23:35 ` Rik van Riel
2012-11-05 22:47 ` [PATCH 13/16] mm: use vm_unmapped_area() on sparc64 architecture Michel Lespinasse
2012-11-05 23:36 ` Rik van Riel
2012-11-05 22:47 ` [PATCH 14/16] mm: use vm_unmapped_area() in hugetlbfs " Michel Lespinasse
2012-11-05 23:36 ` Rik van Riel
2012-11-05 22:47 ` [PATCH 15/16] mm: use vm_unmapped_area() on sparc32 architecture Michel Lespinasse
2012-11-05 23:37 ` Rik van Riel
2012-11-06 1:25 ` David Miller
2012-11-06 3:13 ` Michel Lespinasse
2012-11-06 7:30 ` Rik van Riel
2012-11-06 17:41 ` David Miller
2012-11-05 22:47 ` [PATCH 16/16] mm: use vm_unmapped_area() in hugetlbfs on tile architecture Michel Lespinasse
2012-11-05 23:37 ` Rik van Riel
2012-11-06 22:11 ` [PATCH 00/16] mm: use augmented rbtrees for finding unmapped areas Andrew Morton
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1352155633-8648-7-git-send-email-walken@google.com \
--to=walken@google.com \
--cc=akpm@linux-foundation.org \
--cc=cmetcalf@tilera.com \
--cc=davem@davemloft.net \
--cc=hughd@google.com \
--cc=lethal@linux-sh.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mips@linux-mips.org \
--cc=linux-mm@kvack.org \
--cc=linux-sh@vger.kernel.org \
--cc=linux@arm.linux.org.uk \
--cc=ralf@linux-mips.org \
--cc=riel@redhat.com \
--cc=sparclinux@vger.kernel.org \
--cc=wli@holomorphy.com \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).