From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
To: Hugh Dickins <hughd@google.com>,
Andrea Arcangeli <aarcange@redhat.com>,
Andrew Morton <akpm@linux-foundation.org>
Cc: Dave Hansen <dave.hansen@intel.com>,
Vlastimil Babka <vbabka@suse.cz>,
Christoph Lameter <cl@gentwo.org>,
Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>,
Jerome Marchand <jmarchan@redhat.com>,
Yang Shi <yang.shi@linaro.org>,
Sasha Levin <sasha.levin@oracle.com>,
Andres Lagar-Cavilla <andreslc@google.com>,
Ning Qu <quning@gmail.com>,
linux-kernel@vger.kernel.org, linux-mm@kvack.org,
linux-fsdevel@vger.kernel.org,
"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [PATCHv6 06/30] mm: introduce do_set_pmd()
Date: Thu, 7 Apr 2016 01:50:56 +0300 [thread overview]
Message-ID: <1459983080-106718-7-git-send-email-kirill.shutemov@linux.intel.com> (raw)
In-Reply-To: <1459983080-106718-1-git-send-email-kirill.shutemov@linux.intel.com>
With postponed page table allocation we have chance to setup huge pages.
do_set_pte() calls do_set_pmd() if following criteria met:
- page is compound;
- pmd entry in pmd_none();
- vma has suitable size and alignment;
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
include/linux/huge_mm.h | 2 ++
mm/huge_memory.c | 8 ------
mm/memory.c | 72 ++++++++++++++++++++++++++++++++++++++++++++++++-
mm/migrate.c | 3 +--
4 files changed, 74 insertions(+), 11 deletions(-)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index cad4ca270fda..8a0da3317402 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -144,6 +144,8 @@ static inline bool is_huge_zero_pmd(pmd_t pmd)
struct page *get_huge_zero_page(void);
+#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
+
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 6d447ca95efa..8c76f12ea097 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -793,14 +793,6 @@ pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
return pmd;
}
-static inline pmd_t mk_huge_pmd(struct page *page, pgprot_t prot)
-{
- pmd_t entry;
- entry = mk_pmd(page, prot);
- entry = pmd_mkhuge(entry);
- return entry;
-}
-
static inline struct list_head *page_deferred_list(struct page *page)
{
/*
diff --git a/mm/memory.c b/mm/memory.c
index 095b210cafe9..f9054d5f7775 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2866,6 +2866,66 @@ map_pte:
return 0;
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+
+#define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1)
+static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
+ unsigned long haddr)
+{
+ if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) !=
+ (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK))
+ return false;
+ if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
+ return false;
+ return true;
+}
+
+static int do_set_pmd(struct fault_env *fe, struct page *page)
+{
+ struct vm_area_struct *vma = fe->vma;
+ bool write = fe->flags & FAULT_FLAG_WRITE;
+ unsigned long haddr = fe->address & HPAGE_PMD_MASK;
+ pmd_t entry;
+ int i, ret;
+
+ if (!transhuge_vma_suitable(vma, haddr))
+ return VM_FAULT_FALLBACK;
+
+ ret = VM_FAULT_FALLBACK;
+ page = compound_head(page);
+
+ fe->ptl = pmd_lock(vma->vm_mm, fe->pmd);
+ if (unlikely(!pmd_none(*fe->pmd)))
+ goto out;
+
+ for (i = 0; i < HPAGE_PMD_NR; i++)
+ flush_icache_page(vma, page + i);
+
+ entry = mk_huge_pmd(page, vma->vm_page_prot);
+ if (write)
+ entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+
+ add_mm_counter(vma->vm_mm, MM_FILEPAGES, HPAGE_PMD_NR);
+ page_add_file_rmap(page, true);
+
+ set_pmd_at(vma->vm_mm, haddr, fe->pmd, entry);
+
+ update_mmu_cache_pmd(vma, haddr, fe->pmd);
+
+ /* fault is handled */
+ ret = 0;
+out:
+ spin_unlock(fe->ptl);
+ return ret;
+}
+#else
+static int do_set_pmd(struct fault_env *fe, struct page *page)
+{
+ BUILD_BUG();
+ return 0;
+}
+#endif
+
/**
* alloc_set_pte - setup new PTE entry for given page and add reverse page
* mapping. If needed, the fucntion allocates page table or use pre-allocated.
@@ -2885,9 +2945,19 @@ int alloc_set_pte(struct fault_env *fe, struct mem_cgroup *memcg,
struct vm_area_struct *vma = fe->vma;
bool write = fe->flags & FAULT_FLAG_WRITE;
pte_t entry;
+ int ret;
+
+ if (pmd_none(*fe->pmd) && PageTransCompound(page)) {
+ /* THP on COW? */
+ VM_BUG_ON_PAGE(memcg, page);
+
+ ret = do_set_pmd(fe, page);
+ if (ret != VM_FAULT_FALLBACK)
+ return ret;
+ }
if (!fe->pte) {
- int ret = pte_alloc_one_map(fe);
+ ret = pte_alloc_one_map(fe);
if (ret)
return ret;
}
diff --git a/mm/migrate.c b/mm/migrate.c
index d20276fffce7..5c9cd90334ea 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1820,8 +1820,7 @@ fail_putback:
}
orig_entry = *pmd;
- entry = mk_pmd(new_page, vma->vm_page_prot);
- entry = pmd_mkhuge(entry);
+ entry = mk_huge_pmd(new_page, vma->vm_page_prot);
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
/*
--
2.8.0.rc3
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2016-04-06 22:51 UTC|newest]
Thread overview: 31+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-04-06 22:50 [PATCHv6 00/30] THP-enabled tmpfs/shmem using compound pages Kirill A. Shutemov
2016-04-06 22:50 ` [PATCHv6 01/30] thp, mlock: update unevictable-lru.txt Kirill A. Shutemov
2016-04-06 22:50 ` [PATCHv6 02/30] mm: do not pass mm_struct into handle_mm_fault Kirill A. Shutemov
2016-04-06 22:50 ` [PATCHv6 03/30] mm: introduce fault_env Kirill A. Shutemov
2016-04-06 22:50 ` [PATCHv6 04/30] mm: postpone page table allocation until we have page to map Kirill A. Shutemov
2016-04-06 22:50 ` [PATCHv6 05/30] rmap: support file thp Kirill A. Shutemov
2016-04-06 22:50 ` Kirill A. Shutemov [this message]
2016-04-06 22:50 ` [PATCHv6 07/30] thp, vmstats: add counters for huge file pages Kirill A. Shutemov
2016-04-06 22:50 ` [PATCHv6 08/30] thp: support file pages in zap_huge_pmd() Kirill A. Shutemov
2016-04-06 22:50 ` [PATCHv6 09/30] thp: handle file pages in split_huge_pmd() Kirill A. Shutemov
2016-04-06 22:51 ` [PATCHv6 10/30] thp: handle file COW faults Kirill A. Shutemov
2016-04-06 22:51 ` [PATCHv6 11/30] thp: handle file pages in mremap() Kirill A. Shutemov
2016-04-06 22:51 ` [PATCHv6 12/30] thp: skip file huge pmd on copy_huge_pmd() Kirill A. Shutemov
2016-04-06 22:51 ` [PATCHv6 13/30] thp: prepare change_huge_pmd() for file thp Kirill A. Shutemov
2016-04-06 22:51 ` [PATCHv6 14/30] thp: run vma_adjust_trans_huge() outside i_mmap_rwsem Kirill A. Shutemov
2016-04-06 22:51 ` [PATCHv6 15/30] thp: file pages support for split_huge_page() Kirill A. Shutemov
2016-04-06 22:51 ` [PATCHv6 16/30] thp, mlock: do not mlock PTE-mapped file huge pages Kirill A. Shutemov
2016-04-06 22:51 ` [PATCHv6 17/30] vmscan: split file huge pages before paging them out Kirill A. Shutemov
2016-04-06 22:51 ` [PATCHv6 18/30] page-flags: relax policy for PG_mappedtodisk and PG_reclaim Kirill A. Shutemov
2016-04-06 22:51 ` [PATCHv6 19/30] radix-tree: implement radix_tree_maybe_preload_order() Kirill A. Shutemov
2016-04-06 22:51 ` [PATCHv6 20/30] filemap: prepare find and delete operations for huge pages Kirill A. Shutemov
2016-04-06 22:51 ` [PATCHv6 21/30] truncate: handle file thp Kirill A. Shutemov
2016-04-06 22:51 ` [PATCHv6 22/30] mm, rmap: account shmem thp pages Kirill A. Shutemov
2016-04-06 22:51 ` [PATCHv6 23/30] shmem: prepare huge= mount option and sysfs knob Kirill A. Shutemov
2016-04-06 22:51 ` [PATCHv6 24/30] shmem: get_unmapped_area align huge page Kirill A. Shutemov
2016-04-06 22:51 ` [PATCHv6 25/30] shmem: add huge pages support Kirill A. Shutemov
2016-04-06 22:51 ` [PATCHv6 26/30] shmem, thp: respect MADV_{NO,}HUGEPAGE for file mappings Kirill A. Shutemov
2016-04-06 22:51 ` [PATCHv6 27/30] thp: update Documentation/vm/transhuge.txt Kirill A. Shutemov
2016-04-06 22:51 ` [PATCHv6 28/30] thp: extract khugepaged from mm/huge_memory.c Kirill A. Shutemov
2016-04-06 22:51 ` [PATCHv6 29/30] khugepaged: move up_read(mmap_sem) out of khugepaged_alloc_page() Kirill A. Shutemov
2016-04-06 22:51 ` [PATCHv6 30/30] khugepaged: add support of collapse for tmpfs/shmem pages Kirill A. Shutemov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1459983080-106718-7-git-send-email-kirill.shutemov@linux.intel.com \
--to=kirill.shutemov@linux.intel.com \
--cc=aarcange@redhat.com \
--cc=akpm@linux-foundation.org \
--cc=andreslc@google.com \
--cc=cl@gentwo.org \
--cc=dave.hansen@intel.com \
--cc=hughd@google.com \
--cc=jmarchan@redhat.com \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=n-horiguchi@ah.jp.nec.com \
--cc=quning@gmail.com \
--cc=sasha.levin@oracle.com \
--cc=vbabka@suse.cz \
--cc=yang.shi@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).