From: Minchan Kim <minchan@kernel.org>
To: linux-mm@kvack.org, linux-kernel@vger.kernel.org
Cc: Andrew Morton <akpm@linux-foundation.org>,
Mel Gorman <mgorman@suse.de>, Hugh Dickins <hughd@google.com>,
Dave Hansen <dave.hansen@intel.com>,
Rik van Riel <riel@redhat.com>,
KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>,
Michel Lespinasse <walken@google.com>,
Johannes Weiner <hannes@cmpxchg.org>,
John Stultz <john.stultz@linaro.org>,
Dhaval Giani <dhaval.giani@gmail.com>,
"H. Peter Anvin" <hpa@zytor.com>,
Android Kernel Team <kernel-team@android.com>,
Robert Love <rlove@google.com>, Mel Gorman <mel@csn.ul.ie>,
Dmitry Adamushko <dmitry.adamushko@gmail.com>,
Dave Chinner <david@fromorbit.com>, Neil Brown <neilb@suse.de>,
Andrea Righi <andrea@betterlinux.com>,
Andrea Arcangeli <aarcange@redhat.com>,
"Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>,
Mike Hommey <mh@glandium.org>, Taras Glek <tglek@mozilla.com>,
Jan Kara <jack@suse.cz>,
KOSAKI Motohiro <kosaki.motohiro@gmail.com>,
Rob Clark <robdclark@gmail.com>, Jason Evans <je@fb.com>,
Minchan Kim <minchan@kernel.org>
Subject: [PATCH v10 15/16] vrange: Prevent unnecessary scanning
Date: Thu, 2 Jan 2014 16:12:23 +0900 [thread overview]
Message-ID: <1388646744-15608-16-git-send-email-minchan@kernel.org> (raw)
In-Reply-To: <1388646744-15608-1-git-send-email-minchan@kernel.org>
Now, we scan and discard volatile pages per vrange size but vrange
size is virtual address so we couldn't imagine how many of rss be
there. It could make too excessive scanning in reclaim path if
the range is too big but doesn't have rss so that CPU burns out.
Another problem is we always start from vrange's starting address
everytime although many of pages are already purged in previous
iteration so that it ends up CPU buring, too.
This patch keeps previous scan address in vrange's hint variable
so that we could avoid unnecessary scanning in next round.
Even, if we purge all of pages in the range, we could skip the
vrange.
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Hugh Dickins <hughd@google.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@gmail.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: John Stultz <john.stultz@linaro.org>
Signed-off-by: Minchan Kim <minchan@kernel.org>
---
mm/vrange.c | 107 ++++++++++++++++++++++++++++++++++++++++++++++++++---------
1 file changed, 91 insertions(+), 16 deletions(-)
diff --git a/mm/vrange.c b/mm/vrange.c
index df01c6b084bf..6cdbf6feed26 100644
--- a/mm/vrange.c
+++ b/mm/vrange.c
@@ -31,6 +31,11 @@ struct vrange_walker {
#define VRANGE_PURGED_MARK 0
+/*
+ * [mark|clear]_purge could invalidate cached address but it's rare
+ * and at the worst case, some address range would be rescan or skip
+ * so it isn't critical for integrity point of view.
+ */
void mark_purge(struct vrange *range)
{
range->hint |= (1 << VRANGE_PURGED_MARK);
@@ -47,9 +52,36 @@ bool vrange_purged(struct vrange *range)
return purged;
}
-static inline unsigned long vrange_size(struct vrange *range)
+void record_scan_addr(struct vrange *range, unsigned long addr)
{
- return range->node.last + 1 - range->node.start;
+ unsigned long old, new, ret;
+
+ BUG_ON(addr & ~PAGE_MASK);
+
+ /*
+ * hint variable is shared by cache address and purged flag.
+ * purged flag is modified while we hold vrange_lock but
+ * cache address is modified without any lock so that it
+ * could invalidate purged flag by racing do_purge, which
+ * is critical. The cmpxchg should prevent it.
+ */
+ do {
+ old = range->hint;
+ new = old | addr;
+ ret = cmpxchg(&range->hint, old, new);
+ } while (ret != old);
+
+ BUG_ON(addr && addr > range->node.last + 1);
+ BUG_ON(addr && addr < range->node.start);
+}
+
+unsigned long load_scan_addr(struct vrange *range)
+{
+ unsigned long cached_addr = range->hint & PAGE_MASK;
+ BUG_ON(cached_addr && cached_addr > range->node.last + 1);
+ BUG_ON(cached_addr && cached_addr < range->node.start);
+
+ return cached_addr;
}
static void vroot_ctor(void *data)
@@ -259,6 +291,14 @@ static inline void __vrange_lru_add(struct vrange *range)
spin_unlock(&vrange_list.lock);
}
+static inline void __vrange_lru_add_tail(struct vrange *range)
+{
+ spin_lock(&vrange_list.lock);
+ WARN_ON(!list_empty(&range->lru));
+ list_add_tail(&range->lru, &vrange_list.list);
+ spin_unlock(&vrange_list.lock);
+}
+
static inline void __vrange_lru_del(struct vrange *range)
{
spin_lock(&vrange_list.lock);
@@ -306,6 +346,9 @@ static inline void __vrange_set(struct vrange *range,
{
range->node.start = start_idx;
range->node.last = end_idx;
+
+ /* If resize happens, invalidate cache addr */
+ range->hint = 0;
if (purged)
mark_purge(range);
else
@@ -1069,12 +1112,13 @@ static unsigned long discard_vma_pages(struct mm_struct *mm,
* so avoid touching vrange->owner.
*/
static int __discard_vrange_anon(struct mm_struct *mm, struct vrange *vrange,
- unsigned long *ret_discard)
+ unsigned long *ret_discard, unsigned long *scan)
{
struct vm_area_struct *vma;
unsigned long nr_discard = 0;
unsigned long start = vrange->node.start;
unsigned long end = vrange->node.last + 1;
+ unsigned long cached_addr;
int ret = 0;
/* It prevent to destroy vma when the process exist */
@@ -1087,6 +1131,10 @@ static int __discard_vrange_anon(struct mm_struct *mm, struct vrange *vrange,
goto out; /* this vrange could be retried */
}
+ cached_addr = load_scan_addr(vrange);
+ if (cached_addr)
+ start = cached_addr;
+
vma = find_vma(mm, start);
if (!vma || (vma->vm_start >= end))
goto out_unlock;
@@ -1097,10 +1145,18 @@ static int __discard_vrange_anon(struct mm_struct *mm, struct vrange *vrange,
BUG_ON(vma->vm_flags & (VM_SPECIAL|VM_LOCKED|VM_MIXEDMAP|
VM_HUGETLB));
cond_resched();
- nr_discard += discard_vma_pages(mm, vma,
- max_t(unsigned long, start, vma->vm_start),
- min_t(unsigned long, end, vma->vm_end));
+
+ start = max(start, vma->vm_start);
+ end = min(end, vma->vm_end);
+ end = min(start + *scan, end);
+
+ nr_discard += discard_vma_pages(mm, vma, start, end);
+ *scan -= (end - start);
+ if (!*scan)
+ break;
}
+
+ record_scan_addr(vrange, end);
out_unlock:
up_read(&mm->mmap_sem);
mmput(mm);
@@ -1110,18 +1166,27 @@ out:
}
static int __discard_vrange_file(struct address_space *mapping,
- struct vrange *vrange, unsigned long *ret_discard)
+ struct vrange *vrange, unsigned long *ret_discard,
+ unsigned long *scan)
{
struct pagevec pvec;
pgoff_t index;
int i, ret = 0;
+ unsigned long cached_addr;
unsigned long nr_discard = 0;
unsigned long start_idx = vrange->node.start;
unsigned long end_idx = vrange->node.last;
const pgoff_t start = start_idx >> PAGE_CACHE_SHIFT;
- pgoff_t end = end_idx >> PAGE_CACHE_SHIFT;
+ pgoff_t end;
LIST_HEAD(pagelist);
+ cached_addr = load_scan_addr(vrange);
+ if (cached_addr)
+ start_idx = cached_addr;
+
+ end_idx = min(start_idx + *scan, end_idx);
+ end = end_idx >> PAGE_CACHE_SHIFT;
+
pagevec_init(&pvec, 0);
index = start;
while (index <= end && pagevec_lookup(&pvec, mapping, index,
@@ -1141,16 +1206,20 @@ static int __discard_vrange_file(struct address_space *mapping,
index++;
}
+ *scan -= (end_idx + 1 - start_idx);
+
if (!list_empty(&pagelist))
nr_discard = discard_vrange_pagelist(&pagelist);
+ record_scan_addr(vrange, end_idx + 1);
*ret_discard = nr_discard;
putback_lru_pages(&pagelist);
return ret;
}
-static int discard_vrange(struct vrange *vrange, unsigned long *nr_discard)
+static int discard_vrange(struct vrange *vrange, unsigned long *nr_discard,
+ unsigned long *scan)
{
int ret = 0;
struct vrange_root *vroot;
@@ -1169,10 +1238,10 @@ static int discard_vrange(struct vrange *vrange, unsigned long *nr_discard)
if (vroot->type == VRANGE_MM) {
struct mm_struct *mm = vroot->object;
- ret = __discard_vrange_anon(mm, vrange, nr_discard);
+ ret = __discard_vrange_anon(mm, vrange, nr_discard, scan);
} else if (vroot->type == VRANGE_FILE) {
struct address_space *mapping = vroot->object;
- ret = __discard_vrange_file(mapping, vrange, nr_discard);
+ ret = __discard_vrange_file(mapping, vrange, nr_discard, scan);
}
out:
@@ -1188,7 +1257,7 @@ unsigned long shrink_vrange(enum lru_list lru, struct lruvec *lruvec,
int retry = 10;
struct vrange *range;
unsigned long nr_to_reclaim, total_reclaimed = 0;
- unsigned long long scan_threshold = VRANGE_SCAN_THRESHOLD;
+ unsigned long remained_scan = VRANGE_SCAN_THRESHOLD;
if (!(sc->gfp_mask & __GFP_IO))
return 0;
@@ -1209,7 +1278,7 @@ unsigned long shrink_vrange(enum lru_list lru, struct lruvec *lruvec,
nr_to_reclaim = sc->nr_to_reclaim;
- while (nr_to_reclaim > 0 && scan_threshold > 0 && retry) {
+ while (nr_to_reclaim > 0 && remained_scan > 0 && retry) {
unsigned long nr_reclaimed = 0;
int ret;
@@ -1224,9 +1293,7 @@ unsigned long shrink_vrange(enum lru_list lru, struct lruvec *lruvec,
continue;
}
- ret = discard_vrange(range, &nr_reclaimed);
- scan_threshold -= vrange_size(range);
-
+ ret = discard_vrange(range, &nr_reclaimed, &remained_scan);
/* If it's EAGAIN, retry it after a little */
if (ret == -EAGAIN) {
retry--;
@@ -1235,6 +1302,14 @@ unsigned long shrink_vrange(enum lru_list lru, struct lruvec *lruvec,
continue;
}
+ if (load_scan_addr(range) < range->node.last) {
+ /*
+ * We like full range purging of a range rather than
+ * partial range purging of all ranges for fairness.
+ */
+ __vrange_lru_add_tail(range);
+ }
+
__vrange_put(range);
retry = 10;
--
1.7.9.5
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2014-01-02 7:13 UTC|newest]
Thread overview: 39+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-01-02 7:12 [PATCH v10 00/16] Volatile Ranges v10 Minchan Kim
2014-01-02 7:12 ` [PATCH v10 01/16] vrange: Add vrange support to mm_structs Minchan Kim
2014-01-02 7:12 ` [PATCH v10 02/16] vrange: Clear volatility on new mmaps Minchan Kim
2014-01-02 7:12 ` [PATCH v10 03/16] vrange: Add support for volatile ranges on file mappings Minchan Kim
2014-01-02 7:12 ` [PATCH v10 04/16] vrange: Add new vrange(2) system call Minchan Kim
2014-01-02 7:12 ` [PATCH v10 05/16] vrange: Add basic functions to purge volatile pages Minchan Kim
2014-01-02 7:12 ` [PATCH v10 06/16] vrange: introduce fake VM_VRANGE flag Minchan Kim
2014-01-02 7:12 ` [PATCH v10 07/16] vrange: Purge volatile pages when memory is tight Minchan Kim
2014-01-02 7:12 ` [PATCH v10 08/16] vrange: Send SIGBUS when user try to access purged page Minchan Kim
2014-01-02 7:12 ` [PATCH v10 09/16] vrange: Add core shrinking logic for swapless system Minchan Kim
2014-01-02 7:12 ` [PATCH v10 10/16] vrange: Purging vrange-anon pages from shrinker Minchan Kim
2014-01-02 7:12 ` [PATCH v10 11/16] vrange: support shmem_purge_page Minchan Kim
2014-01-02 7:12 ` [PATCH v10 12/16] vrange: Support background purging for vrange-file Minchan Kim
2014-01-02 7:12 ` [PATCH v10 13/16] vrange: Allocate vroot dynamically Minchan Kim
2014-01-02 7:12 ` [PATCH v10 14/16] vrange: Change purged with hint Minchan Kim
2014-01-02 7:12 ` Minchan Kim [this message]
2014-01-02 7:12 ` [PATCH v10 16/16] vrange: Add vmstat counter about purged page Minchan Kim
2014-01-27 22:23 ` [PATCH v10 00/16] Volatile Ranges v10 KOSAKI Motohiro
2014-01-27 22:43 ` John Stultz
2014-01-28 0:12 ` Minchan Kim
2014-01-28 0:42 ` John Stultz
2014-01-28 1:02 ` Minchan Kim
2014-01-28 1:09 ` Taras Glek
2014-01-28 1:23 ` Minchan Kim
2014-01-29 0:03 ` Johannes Weiner
2014-01-29 1:43 ` John Stultz
2014-01-29 18:30 ` Johannes Weiner
2014-01-31 1:27 ` John Stultz
2014-01-31 1:44 ` Jason Evans
2014-02-04 1:31 ` Minchan Kim
2014-02-04 3:08 ` Jason Evans
2014-02-04 4:58 ` Minchan Kim
2014-02-04 15:25 ` Dave Hansen
2014-01-31 6:15 ` Johannes Weiner
2014-01-29 5:11 ` Minchan Kim
2014-01-31 16:49 ` Johannes Weiner
2014-02-03 14:58 ` Jan Kara
2014-02-03 18:36 ` Johannes Weiner
2014-02-04 1:09 ` Minchan Kim
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1388646744-15608-16-git-send-email-minchan@kernel.org \
--to=minchan@kernel.org \
--cc=aarcange@redhat.com \
--cc=akpm@linux-foundation.org \
--cc=andrea@betterlinux.com \
--cc=aneesh.kumar@linux.vnet.ibm.com \
--cc=dave.hansen@intel.com \
--cc=david@fromorbit.com \
--cc=dhaval.giani@gmail.com \
--cc=dmitry.adamushko@gmail.com \
--cc=hannes@cmpxchg.org \
--cc=hpa@zytor.com \
--cc=hughd@google.com \
--cc=jack@suse.cz \
--cc=je@fb.com \
--cc=john.stultz@linaro.org \
--cc=kernel-team@android.com \
--cc=kosaki.motohiro@gmail.com \
--cc=kosaki.motohiro@jp.fujitsu.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mel@csn.ul.ie \
--cc=mgorman@suse.de \
--cc=mh@glandium.org \
--cc=neilb@suse.de \
--cc=riel@redhat.com \
--cc=rlove@google.com \
--cc=robdclark@gmail.com \
--cc=tglek@mozilla.com \
--cc=walken@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).