From: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
To: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>,
Andrea Arcangeli <aarcange@redhat.com>,
Hugh Dickins <hughd@google.com>,
David Rientjes <rientjes@google.com>,
LKML <linux-kernel@vger.kernel.org>,
Linux Memory Management List <linux-mm@kvack.org>
Subject: [PATCH 09/12] thp: introduce khugepaged_prealloc_page and khugepaged_alloc_page
Date: Mon, 13 Aug 2012 19:16:28 +0800 [thread overview]
Message-ID: <5028E20C.3080607@linux.vnet.ibm.com> (raw)
In-Reply-To: <5028E12C.70101@linux.vnet.ibm.com>
They are used to abstract the difference between NUMA enabled and NUMA disabled
to make the code more readable
Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
---
mm/huge_memory.c | 166 ++++++++++++++++++++++++++++++++----------------------
1 files changed, 98 insertions(+), 68 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 050b8d0..82f6cce 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1833,28 +1833,34 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
}
}
-static void collapse_huge_page(struct mm_struct *mm,
- unsigned long address,
- struct page **hpage,
- struct vm_area_struct *vma,
- int node)
+static void khugepaged_alloc_sleep(void)
{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd, _pmd;
- pte_t *pte;
- pgtable_t pgtable;
- struct page *new_page;
- spinlock_t *ptl;
- int isolated;
- unsigned long hstart, hend;
+ wait_event_freezable_timeout(khugepaged_wait, false,
+ msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
+}
- VM_BUG_ON(address & ~HPAGE_PMD_MASK);
-#ifndef CONFIG_NUMA
- up_read(&mm->mmap_sem);
- VM_BUG_ON(!*hpage);
- new_page = *hpage;
-#else
+#ifdef CONFIG_NUMA
+static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
+{
+ if (IS_ERR(*hpage)) {
+ if (!*wait)
+ return false;
+
+ *wait = false;
+ khugepaged_alloc_sleep();
+ } else if (*hpage) {
+ put_page(*hpage);
+ *hpage = NULL;
+ }
+
+ return true;
+}
+
+static struct page
+*khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
+ struct vm_area_struct *vma, unsigned long address,
+ int node)
+{
VM_BUG_ON(*hpage);
/*
* Allocate the page while the vma is still valid and under
@@ -1866,7 +1872,7 @@ static void collapse_huge_page(struct mm_struct *mm,
* mmap_sem in read mode is good idea also to allow greater
* scalability.
*/
- new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
+ *hpage = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
node, __GFP_OTHER_NODE);
/*
@@ -1874,15 +1880,81 @@ static void collapse_huge_page(struct mm_struct *mm,
* preparation for taking it in write mode.
*/
up_read(&mm->mmap_sem);
- if (unlikely(!new_page)) {
+ if (unlikely(!*hpage)) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
*hpage = ERR_PTR(-ENOMEM);
- return;
+ return NULL;
}
- *hpage = new_page;
+
count_vm_event(THP_COLLAPSE_ALLOC);
+ return *hpage;
+}
+#else
+static struct page *khugepaged_alloc_hugepage(bool *wait)
+{
+ struct page *hpage;
+
+ do {
+ hpage = alloc_hugepage(khugepaged_defrag());
+ if (!hpage) {
+ count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
+ if (!*wait)
+ return NULL;
+
+ *wait = false;
+ khugepaged_alloc_sleep();
+ } else
+ count_vm_event(THP_COLLAPSE_ALLOC);
+ } while (unlikely(!hpage) && likely(khugepaged_enabled()));
+
+ return hpage;
+}
+
+static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
+{
+ if (!*hpage)
+ *hpage = khugepaged_alloc_hugepage(wait);
+
+ if (unlikely(!*hpage))
+ return false;
+
+ return true;
+}
+
+static struct page
+*khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
+ struct vm_area_struct *vma, unsigned long address,
+ int node)
+{
+ up_read(&mm->mmap_sem);
+ VM_BUG_ON(!*hpage);
+ return *hpage;
+}
#endif
+static void collapse_huge_page(struct mm_struct *mm,
+ unsigned long address,
+ struct page **hpage,
+ struct vm_area_struct *vma,
+ int node)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd, _pmd;
+ pte_t *pte;
+ pgtable_t pgtable;
+ struct page *new_page;
+ spinlock_t *ptl;
+ int isolated;
+ unsigned long hstart, hend;
+
+ VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+
+ /* release the mmap_sem read lock. */
+ new_page = khugepaged_alloc_page(hpage, mm, vma, address, node);
+ if (!new_page)
+ return;
+
if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)))
return;
@@ -2230,34 +2302,6 @@ static int khugepaged_wait_event(void)
kthread_should_stop();
}
-static void khugepaged_alloc_sleep(void)
-{
- wait_event_freezable_timeout(khugepaged_wait, false,
- msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
-}
-
-#ifndef CONFIG_NUMA
-static struct page *khugepaged_alloc_hugepage(bool *wait)
-{
- struct page *hpage;
-
- do {
- hpage = alloc_hugepage(khugepaged_defrag());
- if (!hpage) {
- count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
- if (!*wait)
- return NULL;
-
- *wait = false;
- khugepaged_alloc_sleep();
- } else
- count_vm_event(THP_COLLAPSE_ALLOC);
- } while (unlikely(!hpage) && likely(khugepaged_enabled()));
-
- return hpage;
-}
-#endif
-
static void khugepaged_do_scan(void)
{
struct page *hpage = NULL;
@@ -2268,23 +2312,9 @@ static void khugepaged_do_scan(void)
barrier(); /* write khugepaged_pages_to_scan to local stack */
while (progress < pages) {
-#ifndef CONFIG_NUMA
- if (!hpage)
- hpage = khugepaged_alloc_hugepage(&wait);
-
- if (unlikely(!hpage))
+ if (!khugepaged_prealloc_page(&hpage, &wait))
break;
-#else
- if (IS_ERR(hpage)) {
- if (!wait)
- break;
- wait = false;
- khugepaged_alloc_sleep();
- } else if (hpage) {
- put_page(hpage);
- hpage = NULL;
- }
-#endif
+
cond_resched();
if (unlikely(kthread_should_stop() || freezing(current)))
--
1.7.7.6
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2012-08-13 11:16 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-08-13 11:12 [PATCH 00/12] thp: optimize use of khugepaged_mutex and dependence of CONFIG_NUMA Xiao Guangrong
2012-08-13 11:13 ` [PATCH 01/12] thp: fix the count of THP_COLLAPSE_ALLOC Xiao Guangrong
2012-08-13 11:19 ` Kirill A. Shutemov
2012-08-13 11:13 ` [PATCH 02/12] thp: remove unnecessary check in start_khugepaged Xiao Guangrong
2012-08-13 11:14 ` [PATCH 03/12] thp: move khugepaged_mutex out of khugepaged Xiao Guangrong
2012-08-13 11:14 ` [PATCH 04/12] thp: remove unnecessary khugepaged_thread check Xiao Guangrong
2012-08-13 11:14 ` [PATCH 05/12] thp: remove wake_up_interruptible in the exit path Xiao Guangrong
2012-08-13 11:15 ` [PATCH 06/12] thp: remove some code depend on CONFIG_NUMA Xiao Guangrong
2012-08-13 11:15 ` [PATCH 07/12] thp: merge page pre-alloc in khugepaged_loop into khugepaged_do_scan Xiao Guangrong
2012-08-13 11:16 ` [PATCH 08/12] thp: release page in page pre-alloc path Xiao Guangrong
2012-08-13 11:16 ` Xiao Guangrong [this message]
2012-09-12 2:03 ` [PATCH 09/12] thp: introduce khugepaged_prealloc_page and khugepaged_alloc_page Hugh Dickins
2012-09-12 2:35 ` Xiao Guangrong
2012-09-12 3:37 ` Xiao Guangrong
2012-09-13 6:27 ` Hugh Dickins
2012-09-13 6:33 ` Hugh Dickins
2012-09-13 9:26 ` Xiao Guangrong
2012-08-13 11:16 ` [PATCH 10/12] thp: remove khugepaged_loop Xiao Guangrong
2012-08-13 11:17 ` [PATCH 11/12] thp: use khugepaged_enabled to remove duplicate code Xiao Guangrong
2012-08-13 11:17 ` [PATCH 12/12] thp: remove unnecessary set_recommended_min_free_kbytes Xiao Guangrong
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=5028E20C.3080607@linux.vnet.ibm.com \
--to=xiaoguangrong@linux.vnet.ibm.com \
--cc=aarcange@redhat.com \
--cc=akpm@linux-foundation.org \
--cc=hughd@google.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=rientjes@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).