linux-api.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Minchan Kim <minchan@kernel.org>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	Michael Kerrisk <mtk.manpages@gmail.com>,
	linux-api@vger.kernel.org, Hugh Dickins <hughd@google.com>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Rik van Riel <riel@redhat.com>, Mel Gorman <mgorman@suse.de>,
	KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>,
	Jason Evans <je@fb.com>, Daniel Micay <danielmicay@gmail.com>,
	"Kirill A. Shutemov" <kirill@shutemov.name>,
	Shaohua Li <shli@kernel.org>, Michal Hocko <mhocko@suse.cz>,
	yalin.wang2010@gmail.com, Minchan Kim <minchan@kernel.org>,
	Andrea Arcangeli <aarcange@redhat.com>
Subject: [PATCH v3 13/17] mm: don't split THP page when syscall is called
Date: Thu, 12 Nov 2015 13:33:09 +0900	[thread overview]
Message-ID: <1447302793-5376-14-git-send-email-minchan@kernel.org> (raw)
In-Reply-To: <1447302793-5376-1-git-send-email-minchan@kernel.org>

We don't need to split THP page when MADV_FREE syscall is called.
It could be done when VM decide to free it in reclaim path when
memory pressure is heavy so we could avoid unnecessary THP split.

For that, this patch changes two things

1. __split_huge_page_map

It does pte_mkdirty to subpages only if pmd_dirty is true.

2. __split_huge_page_refcount

It removes marking PG_dirty to subpages unconditionally.

Cc: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Hugh Dickins <hughd@google.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Minchan Kim <minchan@kernel.org>
---
 include/linux/huge_mm.h |  3 +++
 mm/huge_memory.c        | 46 ++++++++++++++++++++++++++++++++++++++++++----
 mm/madvise.c            | 12 +++++++++++-
 3 files changed, 56 insertions(+), 5 deletions(-)

diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index ecb080d6ff42..e9db238a75c1 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -19,6 +19,9 @@ extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
 					  unsigned long addr,
 					  pmd_t *pmd,
 					  unsigned int flags);
+extern int madvise_free_huge_pmd(struct mmu_gather *tlb,
+			struct vm_area_struct *vma,
+			pmd_t *pmd, unsigned long addr);
 extern int zap_huge_pmd(struct mmu_gather *tlb,
 			struct vm_area_struct *vma,
 			pmd_t *pmd, unsigned long addr);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index bbac913f96bc..b8c9b44af864 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1453,6 +1453,41 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
 	return 0;
 }
 
+int madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
+		pmd_t *pmd, unsigned long addr)
+
+{
+	spinlock_t *ptl;
+	pmd_t orig_pmd;
+	struct page *page;
+	struct mm_struct *mm = tlb->mm;
+
+	if (__pmd_trans_huge_lock(pmd, vma, &ptl) != 1)
+		return 1;
+
+	orig_pmd = *pmd;
+	if (is_huge_zero_pmd(orig_pmd))
+		goto out;
+
+	page = pmd_page(orig_pmd);
+	if (PageActive(page))
+		deactivate_page(page);
+
+	if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
+		orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
+			tlb->fullmm);
+		orig_pmd = pmd_mkold(orig_pmd);
+		orig_pmd = pmd_mkclean(orig_pmd);
+
+		set_pmd_at(mm, addr, pmd, orig_pmd);
+		tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
+	}
+out:
+	spin_unlock(ptl);
+
+	return 0;
+}
+
 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
 		 pmd_t *pmd, unsigned long addr)
 {
@@ -1752,8 +1787,8 @@ static void __split_huge_page_refcount(struct page *page,
 				      (1L << PG_mlocked) |
 				      (1L << PG_uptodate) |
 				      (1L << PG_active) |
-				      (1L << PG_unevictable)));
-		page_tail->flags |= (1L << PG_dirty);
+				      (1L << PG_unevictable) |
+				      (1L << PG_dirty)));
 
 		/* clear PageTail before overwriting first_page */
 		smp_wmb();
@@ -1787,7 +1822,6 @@ static void __split_huge_page_refcount(struct page *page,
 
 		BUG_ON(!PageAnon(page_tail));
 		BUG_ON(!PageUptodate(page_tail));
-		BUG_ON(!PageDirty(page_tail));
 		BUG_ON(!PageSwapBacked(page_tail));
 
 		lru_add_page_tail(page, page_tail, lruvec, list);
@@ -1831,10 +1865,12 @@ static int __split_huge_page_map(struct page *page,
 	int ret = 0, i;
 	pgtable_t pgtable;
 	unsigned long haddr;
+	bool dirty;
 
 	pmd = page_check_address_pmd(page, mm, address,
 			PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG, &ptl);
 	if (pmd) {
+		dirty = pmd_dirty(*pmd);
 		pgtable = pgtable_trans_huge_withdraw(mm, pmd);
 		pmd_populate(mm, &_pmd, pgtable);
 		if (pmd_write(*pmd))
@@ -1850,7 +1886,9 @@ static int __split_huge_page_map(struct page *page,
 			 * permissions across VMAs.
 			 */
 			entry = mk_pte(page + i, vma->vm_page_prot);
-			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+			if (dirty)
+				entry = pte_mkdirty(entry);
+			entry = maybe_mkwrite(entry, vma);
 			if (!pmd_write(*pmd))
 				entry = pte_wrprotect(entry);
 			if (!pmd_young(*pmd))
diff --git a/mm/madvise.c b/mm/madvise.c
index 4e67ba0b1104..27ed057c0bd7 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -271,8 +271,17 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
 	pte_t *pte, ptent;
 	struct page *page;
 	int nr_swap = 0;
+	unsigned long next;
+
+	next = pmd_addr_end(addr, end);
+	if (pmd_trans_huge(*pmd)) {
+		if (next - addr != HPAGE_PMD_SIZE)
+			split_huge_page_pmd(vma, addr, pmd);
+		else if (!madvise_free_huge_pmd(tlb, vma, pmd, addr))
+			goto next;
+		/* fall through */
+	}
 
-	split_huge_page_pmd(vma, addr, pmd);
 	if (pmd_trans_unstable(pmd))
 		return 0;
 
@@ -355,6 +364,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
 	arch_leave_lazy_mmu_mode();
 	pte_unmap_unlock(pte - 1, ptl);
 	cond_resched();
+next:
 	return 0;
 }
 
-- 
1.9.1

  parent reply	other threads:[~2015-11-12  4:33 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-11-12  4:32 [PATCH v3 00/17] MADFV_FREE support Minchan Kim
2015-11-12  4:32 ` [PATCH v3 01/17] mm: support madvise(MADV_FREE) Minchan Kim
2015-11-12  4:49   ` Andy Lutomirski
2015-11-12  5:21     ` Daniel Micay
2015-11-13  6:15       ` Minchan Kim
2015-11-13  6:16         ` Daniel Micay
     [not found]           ` <56458056.8020105-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2015-11-13  6:38             ` Minchan Kim
2015-11-13  6:45               ` Daniel Micay
2015-11-13  7:03                 ` Minchan Kim
2015-11-13  8:13                   ` Daniel Micay
2015-11-13 19:46                     ` Andy Lutomirski
2015-11-16  2:13                       ` Minchan Kim
2015-11-16  3:14                         ` yalin wang
     [not found]   ` <1447302793-5376-2-git-send-email-minchan-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
2015-11-12 11:26     ` Kirill A. Shutemov
     [not found]       ` <20151112112620.GB22481-sVvlyX1904swdBt8bTSxpkEMvNT87kid@public.gmane.org>
2015-11-13  6:17         ` Minchan Kim
2015-11-12  4:32 ` [PATCH v3 02/17] mm: define MADV_FREE for some arches Minchan Kim
2015-11-12  4:32 ` [PATCH v3 03/17] arch: uapi: asm: mman.h: Let MADV_FREE have same value for all architectures Minchan Kim
2015-11-12 11:27   ` Kirill A. Shutemov
2015-11-13  6:18     ` Minchan Kim
     [not found]       ` <564895F3.8090300@hotmail.com>
2015-11-15 14:23         ` Chen Gang
2015-11-12  4:33 ` [PATCH v3 04/17] mm: free swp_entry in madvise_free Minchan Kim
2015-11-12  4:33 ` [PATCH v3 05/17] mm: move lazily freed pages to inactive list Minchan Kim
2015-11-12  4:33 ` [PATCH v3 06/17] mm: clear PG_dirty to mark page freeable Minchan Kim
2015-11-12  4:33 ` [PATCH v3 07/17] mm: mark stable page dirty in KSM Minchan Kim
2015-11-12  4:33 ` [PATCH v3 08/17] x86: add pmd_[dirty|mkclean] for THP Minchan Kim
2015-11-12  4:33 ` [PATCH v3 09/17] sparc: " Minchan Kim
2015-11-12  4:33 ` [PATCH v3 10/17] powerpc: " Minchan Kim
2015-11-12  4:33 ` [PATCH v3 11/17] arm: add pmd_mkclean " Minchan Kim
2015-11-12  4:33 ` [PATCH v3 12/17] arm64: " Minchan Kim
2015-11-12  4:33 ` Minchan Kim [this message]
2015-11-12  4:33 ` [PATCH v3 14/17] mm: introduce wrappers to add new LRU Minchan Kim
2015-11-12  4:33 ` [PATCH v3 15/17] mm: introduce lazyfree LRU list Minchan Kim
     [not found] ` <1447302793-5376-1-git-send-email-minchan-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
2015-11-12  4:33   ` [PATCH v3 16/17] mm: support MADV_FREE on swapless system Minchan Kim
2015-11-12  4:33   ` [PATCH v3 17/17] mm: add knob to tune lazyfreeing Minchan Kim
     [not found]     ` <1447302793-5376-18-git-send-email-minchan-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
2015-11-12 19:44       ` Shaohua Li
     [not found]         ` <20151112194453.GA50352-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>
2015-11-13  6:20           ` Minchan Kim

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1447302793-5376-14-git-send-email-minchan@kernel.org \
    --to=minchan@kernel.org \
    --cc=aarcange@redhat.com \
    --cc=akpm@linux-foundation.org \
    --cc=danielmicay@gmail.com \
    --cc=hannes@cmpxchg.org \
    --cc=hughd@google.com \
    --cc=je@fb.com \
    --cc=kirill@shutemov.name \
    --cc=kosaki.motohiro@jp.fujitsu.com \
    --cc=linux-api@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mgorman@suse.de \
    --cc=mhocko@suse.cz \
    --cc=mtk.manpages@gmail.com \
    --cc=riel@redhat.com \
    --cc=shli@kernel.org \
    --cc=yalin.wang2010@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).