From: Minchan Kim <minchan@kernel.org>
To: linux-mm@kvack.org, linux-kernel@vger.kernel.org
Cc: Andrew Morton <akpm@linux-foundation.org>,
Mel Gorman <mgorman@suse.de>, Hugh Dickins <hughd@google.com>,
Dave Hansen <dave.hansen@intel.com>,
Rik van Riel <riel@redhat.com>,
KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>,
Michel Lespinasse <walken@google.com>,
Johannes Weiner <hannes@cmpxchg.org>,
John Stultz <john.stultz@linaro.org>,
Dhaval Giani <dhaval.giani@gmail.com>,
"H. Peter Anvin" <hpa@zytor.com>,
Android Kernel Team <kernel-team@android.com>,
Robert Love <rlove@google.com>, Mel Gorman <mel@csn.ul.ie>,
Dmitry Adamushko <dmitry.adamushko@gmail.com>,
Dave Chinner <david@fromorbit.com>, Neil Brown <neilb@suse.de>,
Andrea Righi <andrea@betterlinux.com>,
Andrea Arcangeli <aarcange@redhat.com>,
"Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>,
Mike Hommey <mh@glandium.org>, Taras Glek <tglek@mozilla.com>,
Jan Kara <jack@suse.cz>,
KOSAKI Motohiro <kosaki.motohiro@gmail.com>,
Rob Clark <robdclark@gmail.com>, Jason Evans <je@fb.com>,
Minchan Kim <minchan@kernel.org>
Subject: [PATCH v10 10/16] vrange: Purging vrange-anon pages from shrinker
Date: Thu, 2 Jan 2014 16:12:18 +0900 [thread overview]
Message-ID: <1388646744-15608-11-git-send-email-minchan@kernel.org> (raw)
In-Reply-To: <1388646744-15608-1-git-send-email-minchan@kernel.org>
This patch provides the logic to discard anonymous vranges by
generating the page list for the volatile ranges setting the ptes
volatile, and discarding the pages.
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Hugh Dickins <hughd@google.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@gmail.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: John Stultz <john.stultz@linaro.org>
[jstultz: Code tweaks and commit log rewording]
Signed-off-by: Minchan Kim <minchan@kernel.org>
---
mm/vrange.c | 184 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 183 insertions(+), 1 deletion(-)
diff --git a/mm/vrange.c b/mm/vrange.c
index 4a52b7a05f9a..0fa669c56ab8 100644
--- a/mm/vrange.c
+++ b/mm/vrange.c
@@ -11,6 +11,8 @@
#include <linux/hugetlb.h>
#include "internal.h"
#include <linux/mmu_notifier.h>
+#include <linux/mm_inline.h>
+#include <linux/migrate.h>
static struct kmem_cache *vrange_cachep;
@@ -19,6 +21,11 @@ static struct vrange_list {
spinlock_t lock;
} vrange_list;
+struct vrange_walker {
+ struct vm_area_struct *vma;
+ struct list_head *pagelist;
+};
+
static inline unsigned long vrange_size(struct vrange *range)
{
return range->node.last + 1 - range->node.start;
@@ -682,11 +689,186 @@ static struct vrange *vrange_isolate(void)
return vrange;
}
-static int discard_vrange(struct vrange *vrange, unsigned long *nr_discard)
+static unsigned long discard_vrange_pagelist(struct list_head *page_list)
+{
+ struct page *page;
+ unsigned int nr_discard = 0;
+ LIST_HEAD(ret_pages);
+ LIST_HEAD(free_pages);
+
+ while (!list_empty(page_list)) {
+ int err;
+ page = list_entry(page_list->prev, struct page, lru);
+ list_del(&page->lru);
+ if (!trylock_page(page)) {
+ list_add(&page->lru, &ret_pages);
+ continue;
+ }
+
+ /*
+ * discard_vpage returns unlocked page if it
+ * is successful
+ */
+ err = discard_vpage(page);
+ if (err) {
+ unlock_page(page);
+ list_add(&page->lru, &ret_pages);
+ continue;
+ }
+
+ ClearPageActive(page);
+ list_add(&page->lru, &free_pages);
+ dec_zone_page_state(page, NR_ISOLATED_ANON);
+ nr_discard++;
+ }
+
+ free_hot_cold_page_list(&free_pages, 1);
+ list_splice(&ret_pages, page_list);
+ return nr_discard;
+}
+
+static void vrange_pte_entry(pte_t pteval, unsigned long address,
+ unsigned ptent_size, struct mm_walk *walk)
{
+ struct page *page;
+ struct vrange_walker *vw = walk->private;
+ struct vm_area_struct *vma = vw->vma;
+ struct list_head *pagelist = vw->pagelist;
+
+ if (pte_none(pteval))
+ return;
+
+ if (!pte_present(pteval))
+ return;
+
+ page = vm_normal_page(vma, address, pteval);
+ if (unlikely(!page))
+ return;
+
+ if (!PageLRU(page) || PageLocked(page))
+ return;
+
+ BUG_ON(PageCompound(page));
+
+ if (isolate_lru_page(page))
+ return;
+
+ list_add(&page->lru, pagelist);
+
+ VM_BUG_ON(page_is_file_cache(page));
+ inc_zone_page_state(page, NR_ISOLATED_ANON);
+}
+
+static int vrange_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
+ struct mm_walk *walk)
+{
+ struct vrange_walker *vw = walk->private;
+ struct vm_area_struct *uninitialized_var(vma);
+ pte_t *pte;
+ spinlock_t *ptl;
+
+ vma = vw->vma;
+ split_huge_page_pmd(vma, addr, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
+
+ pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+ for (; addr != end; pte++, addr += PAGE_SIZE)
+ vrange_pte_entry(*pte, addr, PAGE_SIZE, walk);
+ pte_unmap_unlock(pte - 1, ptl);
+ cond_resched();
+
return 0;
}
+static unsigned long discard_vma_pages(struct mm_struct *mm,
+ struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ unsigned long ret = 0;
+ LIST_HEAD(pagelist);
+ struct vrange_walker vw;
+ struct mm_walk vrange_walk = {
+ .pmd_entry = vrange_pte_range,
+ .mm = vma->vm_mm,
+ .private = &vw,
+ };
+
+ vw.pagelist = &pagelist;
+ vw.vma = vma;
+
+ walk_page_range(start, end, &vrange_walk);
+
+ if (!list_empty(&pagelist))
+ ret = discard_vrange_pagelist(&pagelist);
+
+ putback_lru_pages(&pagelist);
+ return ret;
+}
+
+/*
+ * vrange->owner isn't stable because caller doesn't hold vrange_lock
+ * so avoid touching vrange->owner.
+ */
+static int __discard_vrange_anon(struct mm_struct *mm, struct vrange *vrange,
+ unsigned long *ret_discard)
+{
+ struct vm_area_struct *vma;
+ unsigned long nr_discard = 0;
+ unsigned long start = vrange->node.start;
+ unsigned long end = vrange->node.last + 1;
+ int ret = 0;
+
+ /* It prevent to destroy vma when the process exist */
+ if (!atomic_inc_not_zero(&mm->mm_users))
+ return ret;
+
+ if (!down_read_trylock(&mm->mmap_sem)) {
+ mmput(mm);
+ ret = -EAGAIN;
+ goto out; /* this vrange could be retried */
+ }
+
+ vma = find_vma(mm, start);
+ if (!vma || (vma->vm_start >= end))
+ goto out_unlock;
+
+ for (; vma; vma = vma->vm_next) {
+ if (vma->vm_start >= end)
+ break;
+ BUG_ON(vma->vm_flags & (VM_SPECIAL|VM_LOCKED|VM_MIXEDMAP|
+ VM_HUGETLB));
+ cond_resched();
+ nr_discard += discard_vma_pages(mm, vma,
+ max_t(unsigned long, start, vma->vm_start),
+ min_t(unsigned long, end, vma->vm_end));
+ }
+out_unlock:
+ up_read(&mm->mmap_sem);
+ mmput(mm);
+ *ret_discard = nr_discard;
+out:
+ return ret;
+}
+
+static int discard_vrange(struct vrange *vrange, unsigned long *nr_discard)
+{
+ int ret = 0;
+ struct mm_struct *mm;
+ struct vrange_root *vroot;
+ vroot = vrange->owner;
+
+ /* TODO : handle VRANGE_FILE */
+ if (vroot->type != VRANGE_MM)
+ goto out;
+
+ mm = vroot->object;
+ ret = __discard_vrange_anon(mm, vrange, nr_discard);
+out:
+ return ret;
+}
+
+
#define VRANGE_SCAN_THRESHOLD (4 << 20)
unsigned long shrink_vrange(enum lru_list lru, struct lruvec *lruvec,
--
1.7.9.5
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2014-01-02 7:13 UTC|newest]
Thread overview: 39+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-01-02 7:12 [PATCH v10 00/16] Volatile Ranges v10 Minchan Kim
2014-01-02 7:12 ` [PATCH v10 01/16] vrange: Add vrange support to mm_structs Minchan Kim
2014-01-02 7:12 ` [PATCH v10 02/16] vrange: Clear volatility on new mmaps Minchan Kim
2014-01-02 7:12 ` [PATCH v10 03/16] vrange: Add support for volatile ranges on file mappings Minchan Kim
2014-01-02 7:12 ` [PATCH v10 04/16] vrange: Add new vrange(2) system call Minchan Kim
2014-01-02 7:12 ` [PATCH v10 05/16] vrange: Add basic functions to purge volatile pages Minchan Kim
2014-01-02 7:12 ` [PATCH v10 06/16] vrange: introduce fake VM_VRANGE flag Minchan Kim
2014-01-02 7:12 ` [PATCH v10 07/16] vrange: Purge volatile pages when memory is tight Minchan Kim
2014-01-02 7:12 ` [PATCH v10 08/16] vrange: Send SIGBUS when user try to access purged page Minchan Kim
2014-01-02 7:12 ` [PATCH v10 09/16] vrange: Add core shrinking logic for swapless system Minchan Kim
2014-01-02 7:12 ` Minchan Kim [this message]
2014-01-02 7:12 ` [PATCH v10 11/16] vrange: support shmem_purge_page Minchan Kim
2014-01-02 7:12 ` [PATCH v10 12/16] vrange: Support background purging for vrange-file Minchan Kim
2014-01-02 7:12 ` [PATCH v10 13/16] vrange: Allocate vroot dynamically Minchan Kim
2014-01-02 7:12 ` [PATCH v10 14/16] vrange: Change purged with hint Minchan Kim
2014-01-02 7:12 ` [PATCH v10 15/16] vrange: Prevent unnecessary scanning Minchan Kim
2014-01-02 7:12 ` [PATCH v10 16/16] vrange: Add vmstat counter about purged page Minchan Kim
2014-01-27 22:23 ` [PATCH v10 00/16] Volatile Ranges v10 KOSAKI Motohiro
2014-01-27 22:43 ` John Stultz
2014-01-28 0:12 ` Minchan Kim
2014-01-28 0:42 ` John Stultz
2014-01-28 1:02 ` Minchan Kim
2014-01-28 1:09 ` Taras Glek
2014-01-28 1:23 ` Minchan Kim
2014-01-29 0:03 ` Johannes Weiner
2014-01-29 1:43 ` John Stultz
2014-01-29 18:30 ` Johannes Weiner
2014-01-31 1:27 ` John Stultz
2014-01-31 1:44 ` Jason Evans
2014-02-04 1:31 ` Minchan Kim
2014-02-04 3:08 ` Jason Evans
2014-02-04 4:58 ` Minchan Kim
2014-02-04 15:25 ` Dave Hansen
2014-01-31 6:15 ` Johannes Weiner
2014-01-29 5:11 ` Minchan Kim
2014-01-31 16:49 ` Johannes Weiner
2014-02-03 14:58 ` Jan Kara
2014-02-03 18:36 ` Johannes Weiner
2014-02-04 1:09 ` Minchan Kim
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1388646744-15608-11-git-send-email-minchan@kernel.org \
--to=minchan@kernel.org \
--cc=aarcange@redhat.com \
--cc=akpm@linux-foundation.org \
--cc=andrea@betterlinux.com \
--cc=aneesh.kumar@linux.vnet.ibm.com \
--cc=dave.hansen@intel.com \
--cc=david@fromorbit.com \
--cc=dhaval.giani@gmail.com \
--cc=dmitry.adamushko@gmail.com \
--cc=hannes@cmpxchg.org \
--cc=hpa@zytor.com \
--cc=hughd@google.com \
--cc=jack@suse.cz \
--cc=je@fb.com \
--cc=john.stultz@linaro.org \
--cc=kernel-team@android.com \
--cc=kosaki.motohiro@gmail.com \
--cc=kosaki.motohiro@jp.fujitsu.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mel@csn.ul.ie \
--cc=mgorman@suse.de \
--cc=mh@glandium.org \
--cc=neilb@suse.de \
--cc=riel@redhat.com \
--cc=rlove@google.com \
--cc=robdclark@gmail.com \
--cc=tglek@mozilla.com \
--cc=walken@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).