From: Dan Williams <dan.j.williams@intel.com>
To: akpm@linux-foundation.org
Cc: Andrea Arcangeli <aarcange@redhat.com>,
Dave Hansen <dave@sr71.net>,
linux-nvdimm@lists.01.org, Peter Zijlstra <peterz@infradead.org>,
linux-mm@kvack.org, Mel Gorman <mgorman@suse.de>,
Sasha Levin <sasha.levin@oracle.com>,
Matthew Wilcox <willy@linux.intel.com>,
"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [-mm PATCH v5 15/18] mm, dax: dax-pmd vs thp-pmd vs hugetlbfs-pmd
Date: Thu, 24 Dec 2015 16:59:28 -0800 [thread overview]
Message-ID: <20151225005623.19962.1972.stgit@dwillia2-desk3.jf.intel.com> (raw)
In-Reply-To: <20151221054526.34542.25205.stgit@dwillia2-desk3.jf.intel.com>
A dax-huge-page mapping while it uses some thp helpers is ultimately not a
transparent huge page. The distinction is especially important in the
get_user_pages() path. pmd_devmap() is used to distinguish dax-pmds from
pmd_huge() and pmd_trans_huge() which have slightly different semantics.
Explicitly mark the pmd_trans_huge() helpers that dax needs by adding
pmd_devmap() checks.
Cc: Dave Hansen <dave@sr71.net>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Reported-by: Matthew Wilcox <willy@linux.intel.com>
Reported-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Reported-by: Sasha Levin <sasha.levin@oracle.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
Changes since v4:
1/ Fold in a __split_huge_pmd() regression fix (Kirill)
2/ Add a missing pmd_devmap() check to pmd_trans_huge_lock() (Willy)
arch/x86/include/asm/pgtable.h | 9 ++++++++-
include/linux/huge_mm.h | 5 +++--
include/linux/mm.h | 7 +++++++
mm/huge_memory.c | 38 +++++++++++++++++++++-----------------
mm/memory.c | 8 ++++----
mm/mprotect.c | 5 +++--
mm/pgtable-generic.c | 2 +-
7 files changed, 47 insertions(+), 27 deletions(-)
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index dc962ae41597..993ce3c84ff4 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -167,13 +167,20 @@ static inline int pmd_large(pmd_t pte)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_trans_huge(pmd_t pmd)
{
- return pmd_val(pmd) & _PAGE_PSE;
+ return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
}
static inline int has_transparent_hugepage(void)
{
return cpu_has_pse;
}
+
+#ifdef __HAVE_ARCH_PTE_DEVMAP
+static inline int pmd_devmap(pmd_t pmd)
+{
+ return !!(pmd_val(pmd) & _PAGE_DEVMAP);
+}
+#endif
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 8ca35a131904..d39fa60bd6bf 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -104,7 +104,8 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
#define split_huge_pmd(__vma, __pmd, __address) \
do { \
pmd_t *____pmd = (__pmd); \
- if (pmd_trans_huge(*____pmd)) \
+ if (pmd_trans_huge(*____pmd) \
+ || pmd_devmap(*____pmd)) \
__split_huge_pmd(__vma, __pmd, __address); \
} while (0)
@@ -124,7 +125,7 @@ static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
spinlock_t **ptl)
{
VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
- if (pmd_trans_huge(*pmd))
+ if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
return __pmd_trans_huge_lock(pmd, vma, ptl);
else
return false;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 957afd1b10a5..96f396bbcc9f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1471,6 +1471,13 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
#endif
+#if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
+static inline int pmd_devmap(pmd_t pmd)
+{
+ return 0;
+}
+#endif
+
#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
unsigned long address)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 7356857d7356..4521bec67364 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1024,7 +1024,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
ret = -EAGAIN;
pmd = *src_pmd;
- if (unlikely(!pmd_trans_huge(pmd))) {
+ if (unlikely(!pmd_trans_huge(pmd) && !pmd_devmap(pmd))) {
pte_free(dst_mm, pgtable);
goto out_unlock;
}
@@ -1047,17 +1047,20 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
goto out_unlock;
}
- src_page = pmd_page(pmd);
- VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
- get_page(src_page);
- page_dup_rmap(src_page, true);
- add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
+ if (pmd_trans_huge(pmd)) {
+ /* thp accounting separate from pmd_devmap accounting */
+ src_page = pmd_page(pmd);
+ VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
+ get_page(src_page);
+ page_dup_rmap(src_page, true);
+ add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
+ atomic_long_inc(&dst_mm->nr_ptes);
+ pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
+ }
pmdp_set_wrprotect(src_mm, addr, src_pmd);
pmd = pmd_mkold(pmd_wrprotect(pmd));
- pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
set_pmd_at(dst_mm, addr, dst_pmd, pmd);
- atomic_long_inc(&dst_mm->nr_ptes);
ret = 0;
out_unlock:
@@ -1745,7 +1748,7 @@ bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
spinlock_t **ptl)
{
*ptl = pmd_lock(vma->vm_mm, pmd);
- if (likely(pmd_trans_huge(*pmd)))
+ if (likely(pmd_trans_huge(*pmd) || pmd_devmap(*pmd)))
return true;
spin_unlock(*ptl);
return false;
@@ -2862,7 +2865,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
- VM_BUG_ON(!pmd_trans_huge(*pmd));
+ VM_BUG_ON(!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd));
count_vm_event(THP_SPLIT_PMD);
@@ -2975,14 +2978,15 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE);
ptl = pmd_lock(mm, pmd);
- if (unlikely(!pmd_trans_huge(*pmd)))
+ if (pmd_trans_huge(*pmd)) {
+ page = pmd_page(*pmd);
+ if (PageMlocked(page))
+ get_page(page);
+ else
+ page = NULL;
+ } else if (!pmd_devmap(*pmd))
goto out;
- page = pmd_page(*pmd);
__split_huge_pmd_locked(vma, pmd, haddr, false);
- if (PageMlocked(page))
- get_page(page);
- else
- page = NULL;
out:
spin_unlock(ptl);
mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PMD_SIZE);
@@ -3012,7 +3016,7 @@ static void split_huge_pmd_address(struct vm_area_struct *vma,
return;
pmd = pmd_offset(pud, address);
- if (!pmd_present(*pmd) || !pmd_trans_huge(*pmd))
+ if (!pmd_present(*pmd) || (!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)))
return;
/*
* Caller holds the mmap_sem write mode, so a huge pmd cannot
diff --git a/mm/memory.c b/mm/memory.c
index 9483d2b1dd3b..03b6fa406a28 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -950,7 +950,7 @@ static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src
src_pmd = pmd_offset(src_pud, addr);
do {
next = pmd_addr_end(addr, end);
- if (pmd_trans_huge(*src_pmd)) {
+ if (pmd_trans_huge(*src_pmd) || pmd_devmap(*src_pmd)) {
int err;
VM_BUG_ON(next-addr != HPAGE_PMD_SIZE);
err = copy_huge_pmd(dst_mm, src_mm,
@@ -1177,7 +1177,7 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
- if (pmd_trans_huge(*pmd)) {
+ if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
if (next - addr != HPAGE_PMD_SIZE) {
#ifdef CONFIG_DEBUG_VM
if (!rwsem_is_locked(&tlb->mm->mmap_sem)) {
@@ -3375,7 +3375,7 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
int ret;
barrier();
- if (pmd_trans_huge(orig_pmd)) {
+ if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
unsigned int dirty = flags & FAULT_FLAG_WRITE;
if (pmd_protnone(orig_pmd))
@@ -3404,7 +3404,7 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unlikely(__pte_alloc(mm, vma, pmd, address)))
return VM_FAULT_OOM;
/* if an huge pmd materialized from under us just retry later */
- if (unlikely(pmd_trans_huge(*pmd)))
+ if (unlikely(pmd_trans_huge(*pmd) || pmd_devmap(*pmd)))
return 0;
/*
* A regular pmd is established and it can't morph into a huge pmd
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 9c1445dc8a4c..732e07baf76c 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -149,7 +149,8 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
unsigned long this_pages;
next = pmd_addr_end(addr, end);
- if (!pmd_trans_huge(*pmd) && pmd_none_or_clear_bad(pmd))
+ if (!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)
+ && pmd_none_or_clear_bad(pmd))
continue;
/* invoke the mmu notifier if the pmd is populated */
@@ -158,7 +159,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
mmu_notifier_invalidate_range_start(mm, mni_start, end);
}
- if (pmd_trans_huge(*pmd)) {
+ if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
if (next - addr != HPAGE_PMD_SIZE)
split_huge_pmd(vma, pmd, addr);
else {
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index c311a2ec6fea..9d4767698a1c 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -132,7 +132,7 @@ pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
{
pmd_t pmd;
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
- VM_BUG_ON(!pmd_trans_huge(*pmdp));
+ VM_BUG_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
return pmd;
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2015-12-25 0:59 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-12-21 5:44 [-mm PATCH v4 00/18] get_user_pages() for dax pte and pmd mappings Dan Williams
2015-12-21 5:44 ` [-mm PATCH v4 01/18] kvm: rename pfn_t to kvm_pfn_t Dan Williams
2015-12-21 5:44 ` [-mm PATCH v4 02/18] mm, dax, pmem: introduce pfn_t Dan Williams
2015-12-21 5:44 ` [-mm PATCH v4 03/18] mm: skip memory block registration for ZONE_DEVICE Dan Williams
2015-12-21 5:44 ` [-mm PATCH v4 04/18] mm: introduce find_dev_pagemap() Dan Williams
2015-12-21 5:44 ` [-mm PATCH v4 05/18] x86, mm: introduce vmem_altmap to augment vmemmap_populate() Dan Williams
2015-12-27 8:40 ` Bob Liu
2015-12-21 5:44 ` [-mm PATCH v4 06/18] libnvdimm, pfn, pmem: allocate memmap array in persistent memory Dan Williams
2015-12-21 5:44 ` [-mm PATCH v4 07/18] avr32: convert to asm-generic/memory_model.h Dan Williams
2015-12-21 5:44 ` [-mm PATCH v4 08/18] hugetlb: fix compile error on tile Dan Williams
2015-12-21 5:44 ` [-mm PATCH v4 09/18] frv: fix compiler warning from definition of __pmd() Dan Williams
2015-12-21 5:45 ` [-mm PATCH v4 10/18] x86, mm: introduce _PAGE_DEVMAP Dan Williams
2015-12-21 5:45 ` [-mm PATCH v4 11/18] mm, dax, gpu: convert vm_insert_mixed to pfn_t Dan Williams
2015-12-21 5:45 ` [-mm PATCH v4 12/18] mm, dax: convert vmf_insert_pfn_pmd() " Dan Williams
2015-12-21 5:45 ` [-mm PATCH v4 13/18] libnvdimm, pmem: move request_queue allocation earlier in probe Dan Williams
2015-12-21 5:45 ` [-mm PATCH v4 14/18] mm, dax, pmem: introduce {get|put}_dev_pagemap() for dax-gup Dan Williams
2015-12-27 8:46 ` Bob Liu
2015-12-27 19:02 ` Dan Williams
2015-12-21 5:45 ` [-mm PATCH v4 15/18] mm, dax: dax-pmd vs thp-pmd vs hugetlbfs-pmd Dan Williams
2015-12-25 0:59 ` Dan Williams [this message]
2015-12-25 1:11 ` [-mm PATCH v5 " Sasha Levin
2015-12-30 5:32 ` [-mm PATCH v4 " Williams, Dan J
2015-12-21 5:45 ` [-mm PATCH v4 16/18] mm, x86: get_user_pages() for dax mappings Dan Williams
2015-12-25 1:03 ` [-mm PATCH v5 " Dan Williams
2015-12-21 5:45 ` [-mm PATCH v4 17/18] dax: provide diagnostics for pmd mapping failures Dan Williams
2015-12-21 5:45 ` [-mm PATCH v4 18/18] dax: re-enable dax pmd mappings Dan Williams
2015-12-27 8:33 ` [-mm PATCH v4 00/18] get_user_pages() for dax pte and " Bob Liu
2015-12-27 18:55 ` Dan Williams
2015-12-29 3:23 ` Bob Liu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20151225005623.19962.1972.stgit@dwillia2-desk3.jf.intel.com \
--to=dan.j.williams@intel.com \
--cc=aarcange@redhat.com \
--cc=akpm@linux-foundation.org \
--cc=dave@sr71.net \
--cc=kirill.shutemov@linux.intel.com \
--cc=linux-mm@kvack.org \
--cc=linux-nvdimm@lists.01.org \
--cc=mgorman@suse.de \
--cc=peterz@infradead.org \
--cc=sasha.levin@oracle.com \
--cc=willy@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).