linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: aarcange@redhat.com
To: linux-mm@kvack.org, akpm@linux-foundation.org
Cc: Marcelo Tosatti <mtosatti@redhat.com>,
	Adam Litke <agl@us.ibm.com>, Avi Kivity <avi@redhat.com>,
	Izik Eidus <ieidus@redhat.com>,
	Hugh Dickins <hugh.dickins@tiscali.co.uk>,
	Nick Piggin <npiggin@suse.de>, Rik van Riel <riel@redhat.com>,
	Mel Gorman <mel@csn.ul.ie>, Dave Hansen <dave@linux.vnet.ibm.com>,
	Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Ingo Molnar <mingo@elte.hu>, Mike Travis <travis@sgi.com>,
	KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>,
	Christoph Lameter <cl@linux-foundation.org>,
	Chris Wright <chrisw@sous-sol.org>,
	bpicco@redhat.com,
	KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>,
	Balbir Singh <balbir@linux.vnet.ibm.com>,
	Arnd Bergmann <arnd@arndb.de>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Andrea Arcangeli <aarcange@redhat.com>
Subject: [patch 23/35] clear_copy_huge_page
Date: Tue, 09 Mar 2010 20:39:24 +0100	[thread overview]
Message-ID: <20100309194315.339598368@redhat.com> (raw)
In-Reply-To: 20100309193901.207868642@redhat.com

[-- Attachment #1: clear_copy_huge_page --]
[-- Type: text/plain, Size: 5690 bytes --]

From: Andrea Arcangeli <aarcange@redhat.com>

Move the copy/clear_huge_page functions to common code to share between
hugetlb.c and huge_memory.c.

Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Rik van Riel <riel@redhat.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
---
 include/linux/mm.h |    9 ++++++
 mm/hugetlb.c       |   69 ++--------------------------------------------------
 mm/memory.c        |   70 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 82 insertions(+), 66 deletions(-)

--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1498,5 +1498,14 @@ extern void shake_page(struct page *p, i
 extern atomic_long_t mce_bad_pages;
 extern int soft_offline_page(struct page *page, int flags);
 
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
+extern void clear_huge_page(struct page *page,
+			    unsigned long addr,
+			    unsigned int pages_per_huge_page);
+extern void copy_huge_page(struct page *dst, struct page *src,
+			   unsigned long addr, struct vm_area_struct *vma,
+			   unsigned int pages_per_huge_page);
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_MM_H */
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -385,70 +385,6 @@ static int vma_has_reserves(struct vm_ar
 	return 0;
 }
 
-static void clear_gigantic_page(struct page *page,
-			unsigned long addr, unsigned long sz)
-{
-	int i;
-	struct page *p = page;
-
-	might_sleep();
-	for (i = 0; i < sz/PAGE_SIZE; i++, p = mem_map_next(p, page, i)) {
-		cond_resched();
-		clear_user_highpage(p, addr + i * PAGE_SIZE);
-	}
-}
-static void clear_huge_page(struct page *page,
-			unsigned long addr, unsigned long sz)
-{
-	int i;
-
-	if (unlikely(sz/PAGE_SIZE > MAX_ORDER_NR_PAGES)) {
-		clear_gigantic_page(page, addr, sz);
-		return;
-	}
-
-	might_sleep();
-	for (i = 0; i < sz/PAGE_SIZE; i++) {
-		cond_resched();
-		clear_user_highpage(page + i, addr + i * PAGE_SIZE);
-	}
-}
-
-static void copy_gigantic_page(struct page *dst, struct page *src,
-			   unsigned long addr, struct vm_area_struct *vma)
-{
-	int i;
-	struct hstate *h = hstate_vma(vma);
-	struct page *dst_base = dst;
-	struct page *src_base = src;
-	might_sleep();
-	for (i = 0; i < pages_per_huge_page(h); ) {
-		cond_resched();
-		copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
-
-		i++;
-		dst = mem_map_next(dst, dst_base, i);
-		src = mem_map_next(src, src_base, i);
-	}
-}
-static void copy_huge_page(struct page *dst, struct page *src,
-			   unsigned long addr, struct vm_area_struct *vma)
-{
-	int i;
-	struct hstate *h = hstate_vma(vma);
-
-	if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
-		copy_gigantic_page(dst, src, addr, vma);
-		return;
-	}
-
-	might_sleep();
-	for (i = 0; i < pages_per_huge_page(h); i++) {
-		cond_resched();
-		copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
-	}
-}
-
 static void enqueue_huge_page(struct hstate *h, struct page *page)
 {
 	int nid = page_to_nid(page);
@@ -2333,7 +2269,8 @@ retry_avoidcopy:
 		return -PTR_ERR(new_page);
 	}
 
-	copy_huge_page(new_page, old_page, address, vma);
+	copy_huge_page(new_page, old_page, address, vma,
+		       pages_per_huge_page(h));
 	__SetPageUptodate(new_page);
 
 	/*
@@ -2429,7 +2366,7 @@ retry:
 			ret = -PTR_ERR(page);
 			goto out;
 		}
-		clear_huge_page(page, address, huge_page_size(h));
+		clear_huge_page(page, address, pages_per_huge_page(h));
 		__SetPageUptodate(page);
 
 		if (vma->vm_flags & VM_MAYSHARE) {
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3501,3 +3501,73 @@ void might_fault(void)
 }
 EXPORT_SYMBOL(might_fault);
 #endif
+
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
+static void clear_gigantic_page(struct page *page,
+				unsigned long addr,
+				unsigned int pages_per_huge_page)
+{
+	int i;
+	struct page *p = page;
+
+	might_sleep();
+	for (i = 0; i < pages_per_huge_page;
+	     i++, p = mem_map_next(p, page, i)) {
+		cond_resched();
+		clear_user_highpage(p, addr + i * PAGE_SIZE);
+	}
+}
+void clear_huge_page(struct page *page,
+		     unsigned long addr, unsigned int pages_per_huge_page)
+{
+	int i;
+
+	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
+		clear_gigantic_page(page, addr, pages_per_huge_page);
+		return;
+	}
+
+	might_sleep();
+	for (i = 0; i < pages_per_huge_page; i++) {
+		cond_resched();
+		clear_user_highpage(page + i, addr + i * PAGE_SIZE);
+	}
+}
+
+static void copy_gigantic_page(struct page *dst, struct page *src,
+			       unsigned long addr,
+			       struct vm_area_struct *vma,
+			       unsigned int pages_per_huge_page)
+{
+	int i;
+	struct page *dst_base = dst;
+	struct page *src_base = src;
+	might_sleep();
+	for (i = 0; i < pages_per_huge_page; ) {
+		cond_resched();
+		copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
+
+		i++;
+		dst = mem_map_next(dst, dst_base, i);
+		src = mem_map_next(src, src_base, i);
+	}
+}
+void copy_huge_page(struct page *dst, struct page *src,
+		    unsigned long addr, struct vm_area_struct *vma,
+		    unsigned int pages_per_huge_page)
+{
+	int i;
+
+	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
+		copy_gigantic_page(dst, src, addr, vma, pages_per_huge_page);
+		return;
+	}
+
+	might_sleep();
+	for (i = 0; i < pages_per_huge_page; i++) {
+		cond_resched();
+		copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE,
+				   vma);
+	}
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2010-03-09 19:44 UTC|newest]

Thread overview: 38+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2010-03-09 19:39 [patch 00/35] Transparent Hugepage support #13 aarcange
2010-03-09 19:39 ` [patch 01/35] define MADV_HUGEPAGE aarcange
2010-03-09 19:39 ` [patch 02/35] compound_lock aarcange
2010-03-09 19:39 ` [patch 03/35] alter compound get_page/put_page aarcange
2010-03-09 19:39 ` [patch 04/35] update futex compound knowledge aarcange
2010-03-09 19:39 ` [patch 05/35] fix bad_page to show the real reason the page is bad aarcange
2010-03-09 19:39 ` [patch 06/35] clear compound mapping aarcange
2010-03-09 19:39 ` [patch 07/35] add native_set_pmd_at aarcange
2010-03-09 19:39 ` [patch 08/35] add pmd paravirt ops aarcange
2010-03-09 19:39 ` [patch 09/35] no paravirt version of pmd ops aarcange
2010-03-09 19:39 ` [patch 10/35] export maybe_mkwrite aarcange
2010-03-09 19:39 ` [patch 11/35] comment reminder in destroy_compound_page aarcange
2010-03-09 19:39 ` [patch 12/35] config_transparent_hugepage aarcange
2010-03-09 19:39 ` [patch 13/35] special pmd_trans_* functions aarcange
2010-03-09 19:39 ` [patch 14/35] add pmd mangling generic functions aarcange
2010-03-09 19:39 ` [patch 15/35] add pmd mangling functions to x86 aarcange
2010-03-09 19:39 ` [patch 16/35] bail out gup_fast on splitting pmd aarcange
2010-03-09 19:39 ` [patch 17/35] pte alloc trans splitting aarcange
2010-03-09 19:39 ` [patch 18/35] add pmd mmu_notifier helpers aarcange
2010-03-09 19:39 ` [patch 19/35] clear page compound aarcange
2010-03-09 19:39 ` [patch 20/35] add pmd_huge_pte to mm_struct aarcange
2010-03-09 19:39 ` [patch 21/35] split_huge_page_mm/vma aarcange
2010-03-09 19:39 ` [patch 22/35] split_huge_page paging aarcange
2010-03-09 19:39 ` aarcange [this message]
2010-03-09 19:39 ` [patch 24/35] kvm mmu transparent hugepage support aarcange
2010-03-09 19:39 ` [patch 25/35] _GFP_NO_KSWAPD aarcange
2010-03-09 19:39 ` [patch 26/35] dont alloc harder for gfp nomemalloc even if nowait aarcange
2010-03-09 19:39 ` [patch 27/35] transparent hugepage core aarcange
2010-03-09 19:39 ` [patch 28/35] adapt to mm_counter in -mm aarcange
2010-03-09 19:39 ` [patch 29/35] verify pmd_trans_huge isnt leaking aarcange
2010-03-09 19:39 ` [patch 30/35] madvise(MADV_HUGEPAGE) aarcange
2010-03-09 19:39 ` [patch 31/35] pmd_trans_huge migrate bugcheck aarcange
2010-03-09 19:39 ` [patch 32/35] memcg compound aarcange
2010-03-09 19:39 ` [patch 33/35] memcg huge memory aarcange
2010-03-09 19:39 ` [patch 34/35] transparent hugepage vmstat aarcange
2010-03-09 19:39 ` [patch 35/35] khugepaged aarcange
2010-03-11  0:55 ` [patch 00/35] Transparent Hugepage support #13 Andrea Arcangeli
  -- strict thread matches above, loose matches on Subject: below --
2010-02-26 20:04 [patch 00/35] Transparent Hugepage support #12 aarcange
2010-02-26 20:04 ` [patch 23/35] clear_copy_huge_page aarcange

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20100309194315.339598368@redhat.com \
    --to=aarcange@redhat.com \
    --cc=agl@us.ibm.com \
    --cc=akpm@linux-foundation.org \
    --cc=arnd@arndb.de \
    --cc=avi@redhat.com \
    --cc=balbir@linux.vnet.ibm.com \
    --cc=benh@kernel.crashing.org \
    --cc=bpicco@redhat.com \
    --cc=chrisw@sous-sol.org \
    --cc=cl@linux-foundation.org \
    --cc=dave@linux.vnet.ibm.com \
    --cc=hugh.dickins@tiscali.co.uk \
    --cc=ieidus@redhat.com \
    --cc=kamezawa.hiroyu@jp.fujitsu.com \
    --cc=kosaki.motohiro@jp.fujitsu.com \
    --cc=linux-mm@kvack.org \
    --cc=mel@csn.ul.ie \
    --cc=mingo@elte.hu \
    --cc=mst@redhat.com \
    --cc=mtosatti@redhat.com \
    --cc=npiggin@suse.de \
    --cc=peterz@infradead.org \
    --cc=riel@redhat.com \
    --cc=travis@sgi.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).