From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
To: Hugh Dickins <hughd@google.com>,
Andrea Arcangeli <aarcange@redhat.com>,
Andrew Morton <akpm@linux-foundation.org>
Cc: Dave Hansen <dave.hansen@intel.com>,
Vlastimil Babka <vbabka@suse.cz>,
Christoph Lameter <cl@gentwo.org>,
Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>,
Jerome Marchand <jmarchan@redhat.com>,
Yang Shi <yang.shi@linaro.org>,
Sasha Levin <sasha.levin@oracle.com>,
linux-kernel@vger.kernel.org, linux-mm@kvack.org,
"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [PATCHv3 04/29] thp: rewrite freeze_page()/unfreeze_page() with generic rmap walkers
Date: Thu, 3 Mar 2016 19:51:54 +0300 [thread overview]
Message-ID: <1457023939-98083-5-git-send-email-kirill.shutemov@linux.intel.com> (raw)
In-Reply-To: <1457023939-98083-1-git-send-email-kirill.shutemov@linux.intel.com>
freeze_page() and unfreeze_page() helpers evolved in rather complex
beasts. It would be nice to cut complexity of this code.
This patch rewrites freeze_page() using standard try_to_unmap().
unfreeze_page() is rewritten with remove_migration_ptes().
The result is much simpler.
But the new variant is somewhat slower. Current helpers iterates over
VMAs the compound page is mapped to, and then over ptes within this VMA.
New helpers iterates over small page, then over VMA the small page
mapped to, and only then find relevant pte.
Also we've lost optimization which allows to split PMD directly into
migration entries.
I don't think the slowdown is critical, considering how much simpler
result is and that split_huge_page() is quite rare nowadays. It only
happens due memory pressure or migration.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
mm/huge_memory.c | 214 +++++++------------------------------------------------
1 file changed, 24 insertions(+), 190 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 354464b484a7..a9921a485400 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2904,7 +2904,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
}
static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long haddr, bool freeze)
+ unsigned long haddr)
{
struct mm_struct *mm = vma->vm_mm;
struct page *page;
@@ -2946,18 +2946,12 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
* transferred to avoid any possibility of altering
* permissions across VMAs.
*/
- if (freeze) {
- swp_entry_t swp_entry;
- swp_entry = make_migration_entry(page + i, write);
- entry = swp_entry_to_pte(swp_entry);
- } else {
- entry = mk_pte(page + i, vma->vm_page_prot);
- entry = maybe_mkwrite(entry, vma);
- if (!write)
- entry = pte_wrprotect(entry);
- if (!young)
- entry = pte_mkold(entry);
- }
+ entry = mk_pte(page + i, vma->vm_page_prot);
+ entry = maybe_mkwrite(entry, vma);
+ if (!write)
+ entry = pte_wrprotect(entry);
+ if (!young)
+ entry = pte_mkold(entry);
if (dirty)
SetPageDirty(page + i);
pte = pte_offset_map(&_pmd, haddr);
@@ -3010,13 +3004,6 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
*/
pmdp_invalidate(vma, haddr, pmd);
pmd_populate(mm, pmd, pgtable);
-
- if (freeze) {
- for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
- page_remove_rmap(page + i, false);
- put_page(page + i);
- }
- }
}
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
@@ -3037,7 +3024,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
page = NULL;
} else if (!pmd_devmap(*pmd))
goto out;
- __split_huge_pmd_locked(vma, pmd, haddr, false);
+ __split_huge_pmd_locked(vma, pmd, haddr);
out:
spin_unlock(ptl);
mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PMD_SIZE);
@@ -3114,180 +3101,27 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
}
}
-static void freeze_page_vma(struct vm_area_struct *vma, struct page *page,
- unsigned long address)
+static void freeze_page(struct page *page)
{
- unsigned long haddr = address & HPAGE_PMD_MASK;
- spinlock_t *ptl;
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- int i, nr = HPAGE_PMD_NR;
-
- /* Skip pages which doesn't belong to the VMA */
- if (address < vma->vm_start) {
- int off = (vma->vm_start - address) >> PAGE_SHIFT;
- page += off;
- nr -= off;
- address = vma->vm_start;
- }
-
- pgd = pgd_offset(vma->vm_mm, address);
- if (!pgd_present(*pgd))
- return;
- pud = pud_offset(pgd, address);
- if (!pud_present(*pud))
- return;
- pmd = pmd_offset(pud, address);
- ptl = pmd_lock(vma->vm_mm, pmd);
- if (!pmd_present(*pmd)) {
- spin_unlock(ptl);
- return;
- }
- if (pmd_trans_huge(*pmd)) {
- if (page == pmd_page(*pmd))
- __split_huge_pmd_locked(vma, pmd, haddr, true);
- spin_unlock(ptl);
- return;
- }
- spin_unlock(ptl);
-
- pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
- for (i = 0; i < nr; i++, address += PAGE_SIZE, page++, pte++) {
- pte_t entry, swp_pte;
- swp_entry_t swp_entry;
-
- /*
- * We've just crossed page table boundary: need to map next one.
- * It can happen if THP was mremaped to non PMD-aligned address.
- */
- if (unlikely(address == haddr + HPAGE_PMD_SIZE)) {
- pte_unmap_unlock(pte - 1, ptl);
- pmd = mm_find_pmd(vma->vm_mm, address);
- if (!pmd)
- return;
- pte = pte_offset_map_lock(vma->vm_mm, pmd,
- address, &ptl);
- }
-
- if (!pte_present(*pte))
- continue;
- if (page_to_pfn(page) != pte_pfn(*pte))
- continue;
- flush_cache_page(vma, address, page_to_pfn(page));
- entry = ptep_clear_flush(vma, address, pte);
- if (pte_dirty(entry))
- SetPageDirty(page);
- swp_entry = make_migration_entry(page, pte_write(entry));
- swp_pte = swp_entry_to_pte(swp_entry);
- if (pte_soft_dirty(entry))
- swp_pte = pte_swp_mksoft_dirty(swp_pte);
- set_pte_at(vma->vm_mm, address, pte, swp_pte);
- page_remove_rmap(page, false);
- put_page(page);
- }
- pte_unmap_unlock(pte - 1, ptl);
-}
-
-static void freeze_page(struct anon_vma *anon_vma, struct page *page)
-{
- struct anon_vma_chain *avc;
- pgoff_t pgoff = page_to_pgoff(page);
+ enum ttu_flags ttu_flags = TTU_MIGRATION | TTU_IGNORE_MLOCK |
+ TTU_IGNORE_ACCESS | TTU_RMAP_LOCKED;
+ int i, ret;
VM_BUG_ON_PAGE(!PageHead(page), page);
- anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff,
- pgoff + HPAGE_PMD_NR - 1) {
- unsigned long address = __vma_address(page, avc->vma);
-
- mmu_notifier_invalidate_range_start(avc->vma->vm_mm,
- address, address + HPAGE_PMD_SIZE);
- freeze_page_vma(avc->vma, page, address);
- mmu_notifier_invalidate_range_end(avc->vma->vm_mm,
- address, address + HPAGE_PMD_SIZE);
- }
-}
-
-static void unfreeze_page_vma(struct vm_area_struct *vma, struct page *page,
- unsigned long address)
-{
- spinlock_t *ptl;
- pmd_t *pmd;
- pte_t *pte, entry;
- swp_entry_t swp_entry;
- unsigned long haddr = address & HPAGE_PMD_MASK;
- int i, nr = HPAGE_PMD_NR;
-
- /* Skip pages which doesn't belong to the VMA */
- if (address < vma->vm_start) {
- int off = (vma->vm_start - address) >> PAGE_SHIFT;
- page += off;
- nr -= off;
- address = vma->vm_start;
- }
-
- pmd = mm_find_pmd(vma->vm_mm, address);
- if (!pmd)
- return;
-
- pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
- for (i = 0; i < nr; i++, address += PAGE_SIZE, page++, pte++) {
- /*
- * We've just crossed page table boundary: need to map next one.
- * It can happen if THP was mremaped to non-PMD aligned address.
- */
- if (unlikely(address == haddr + HPAGE_PMD_SIZE)) {
- pte_unmap_unlock(pte - 1, ptl);
- pmd = mm_find_pmd(vma->vm_mm, address);
- if (!pmd)
- return;
- pte = pte_offset_map_lock(vma->vm_mm, pmd,
- address, &ptl);
- }
-
- if (!is_swap_pte(*pte))
- continue;
-
- swp_entry = pte_to_swp_entry(*pte);
- if (!is_migration_entry(swp_entry))
- continue;
- if (migration_entry_to_page(swp_entry) != page)
- continue;
-
- get_page(page);
- page_add_anon_rmap(page, vma, address, false);
-
- entry = pte_mkold(mk_pte(page, vma->vm_page_prot));
- if (PageDirty(page))
- entry = pte_mkdirty(entry);
- if (is_write_migration_entry(swp_entry))
- entry = maybe_mkwrite(entry, vma);
-
- flush_dcache_page(page);
- set_pte_at(vma->vm_mm, address, pte, entry);
-
- /* No need to invalidate - it was non-present before */
- update_mmu_cache(vma, address, pte);
- }
- pte_unmap_unlock(pte - 1, ptl);
+ /* We only need TTU_SPLIT_HUGE_PMD once */
+ ret = try_to_unmap(page, ttu_flags | TTU_SPLIT_HUGE_PMD);
+ for (i = 1; !ret && i < HPAGE_PMD_NR; i++)
+ ret = try_to_unmap(page + i, ttu_flags);
+ VM_BUG_ON(ret);
}
-static void unfreeze_page(struct anon_vma *anon_vma, struct page *page)
+static void unfreeze_page(struct page *page)
{
- struct anon_vma_chain *avc;
- pgoff_t pgoff = page_to_pgoff(page);
-
- anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
- pgoff, pgoff + HPAGE_PMD_NR - 1) {
- unsigned long address = __vma_address(page, avc->vma);
+ int i;
- mmu_notifier_invalidate_range_start(avc->vma->vm_mm,
- address, address + HPAGE_PMD_SIZE);
- unfreeze_page_vma(avc->vma, page, address);
- mmu_notifier_invalidate_range_end(avc->vma->vm_mm,
- address, address + HPAGE_PMD_SIZE);
- }
+ for (i = 0; i < HPAGE_PMD_NR; i++)
+ remove_migration_ptes(page + i, page + i, true);
}
static void __split_huge_page_tail(struct page *head, int tail,
@@ -3365,7 +3199,7 @@ static void __split_huge_page(struct page *page, struct list_head *list)
ClearPageCompound(head);
spin_unlock_irq(&zone->lru_lock);
- unfreeze_page(page_anon_vma(head), head);
+ unfreeze_page(head);
for (i = 0; i < HPAGE_PMD_NR; i++) {
struct page *subpage = head + i;
@@ -3461,7 +3295,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
}
mlocked = PageMlocked(page);
- freeze_page(anon_vma, head);
+ freeze_page(head);
VM_BUG_ON_PAGE(compound_mapcount(head), head);
/* Make sure the page is not on per-CPU pagevec as it takes pin */
@@ -3490,7 +3324,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
BUG();
} else {
spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
- unfreeze_page(anon_vma, head);
+ unfreeze_page(head);
ret = -EBUSY;
}
--
2.7.0
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2016-03-03 16:52 UTC|newest]
Thread overview: 42+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-03-03 16:51 [PATCHv3 00/29] huge tmpfs implementation using compound pages Kirill A. Shutemov
2016-03-03 16:51 ` [PATCHv3 01/29] rmap: introduce rmap_walk_locked() Kirill A. Shutemov
2016-03-03 16:51 ` [PATCHv3 02/29] rmap: extend try_to_unmap() to be usable by split_huge_page() Kirill A. Shutemov
2016-03-03 16:51 ` [PATCHv3 03/29] mm: make remove_migration_ptes() beyond mm/migration.c Kirill A. Shutemov
2016-03-03 16:51 ` Kirill A. Shutemov [this message]
2016-03-03 16:51 ` [PATCHv3 05/29] mm: do not pass mm_struct into handle_mm_fault Kirill A. Shutemov
2016-03-03 16:51 ` [PATCHv3 06/29] mm: introduce fault_env Kirill A. Shutemov
2016-03-03 16:51 ` [PATCHv3 07/29] mm: postpone page table allocation until we have page to map Kirill A. Shutemov
2016-03-03 16:51 ` [PATCHv3 08/29] rmap: support file thp Kirill A. Shutemov
2016-03-03 16:51 ` [PATCHv3 09/29] mm: introduce do_set_pmd() Kirill A. Shutemov
2016-03-03 16:52 ` [PATCHv3 10/29] mm, rmap: account file thp pages Kirill A. Shutemov
2016-03-03 16:52 ` [PATCHv3 11/29] thp, vmstats: add counters for huge file pages Kirill A. Shutemov
2016-03-03 16:52 ` [PATCHv3 12/29] thp: support file pages in zap_huge_pmd() Kirill A. Shutemov
2016-03-03 16:52 ` [PATCHv3 13/29] thp: handle file pages in split_huge_pmd() Kirill A. Shutemov
2016-03-03 16:52 ` [PATCHv3 14/29] thp: handle file COW faults Kirill A. Shutemov
2016-03-03 16:52 ` [PATCHv3 15/29] thp: handle file pages in mremap() Kirill A. Shutemov
2016-03-03 16:52 ` [PATCHv3 16/29] thp: skip file huge pmd on copy_huge_pmd() Kirill A. Shutemov
2016-03-03 16:52 ` [PATCHv3 17/29] thp: prepare change_huge_pmd() for file thp Kirill A. Shutemov
2016-03-03 16:52 ` [PATCHv3 18/29] thp: run vma_adjust_trans_huge() outside i_mmap_rwsem Kirill A. Shutemov
2016-03-03 16:52 ` [PATCHv3 19/29] thp: file pages support for split_huge_page() Kirill A. Shutemov
2016-03-03 16:52 ` [PATCHv3 20/29] thp, mlock: do not mlock PTE-mapped file huge pages Kirill A. Shutemov
2016-03-03 16:52 ` [PATCHv3 21/29] vmscan: split file huge pages before paging them out Kirill A. Shutemov
2016-03-03 16:52 ` [PATCHv3 22/29] page-flags: relax policy for PG_mappedtodisk and PG_reclaim Kirill A. Shutemov
2016-03-03 16:52 ` [PATCHv3 23/29] radix-tree: implement radix_tree_maybe_preload_order() Kirill A. Shutemov
2016-03-03 16:52 ` [PATCHv3 24/29] filemap: prepare find and delete operations for huge pages Kirill A. Shutemov
2016-03-03 16:52 ` [PATCHv3 25/29] truncate: handle file thp Kirill A. Shutemov
2016-03-03 16:52 ` [PATCHv3 26/29] shmem: prepare huge= mount option and sysfs knob Kirill A. Shutemov
2016-03-03 16:52 ` [PATCHv3 27/29] shmem: get_unmapped_area align huge page Kirill A. Shutemov
2016-03-03 16:52 ` [PATCHv3 28/29] shmem: add huge pages support Kirill A. Shutemov
2016-03-03 16:52 ` [PATCHv3 29/29] shmem, thp: respect MADV_{NO,}HUGEPAGE for file mappings Kirill A. Shutemov
2016-03-04 4:20 ` [PATCHv3 00/29] huge tmpfs implementation using compound pages Sasha Levin
2016-03-04 22:53 ` Kirill A. Shutemov
2016-03-04 11:26 ` THP-enabled filesystem vs. FALLOC_FL_PUNCH_HOLE Kirill A. Shutemov
2016-03-04 17:40 ` Dave Hansen
2016-03-04 19:38 ` Hugh Dickins
2016-03-04 22:48 ` Kirill A. Shutemov
2016-03-04 23:05 ` Dave Chinner
2016-03-04 23:24 ` Kirill A. Shutemov
2016-03-05 22:38 ` Dave Chinner
2016-03-06 0:30 ` Kirill A. Shutemov
2016-03-06 23:03 ` Dave Chinner
2016-03-06 23:33 ` Kirill A. Shutemov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1457023939-98083-5-git-send-email-kirill.shutemov@linux.intel.com \
--to=kirill.shutemov@linux.intel.com \
--cc=aarcange@redhat.com \
--cc=akpm@linux-foundation.org \
--cc=cl@gentwo.org \
--cc=dave.hansen@intel.com \
--cc=hughd@google.com \
--cc=jmarchan@redhat.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=n-horiguchi@ah.jp.nec.com \
--cc=sasha.levin@oracle.com \
--cc=vbabka@suse.cz \
--cc=yang.shi@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).