From: aarcange@redhat.com
To: linux-mm@kvack.org, akpm@linux-foundation.org
Cc: Marcelo Tosatti <mtosatti@redhat.com>,
Adam Litke <agl@us.ibm.com>, Avi Kivity <avi@redhat.com>,
Izik Eidus <ieidus@redhat.com>,
Hugh Dickins <hugh.dickins@tiscali.co.uk>,
Nick Piggin <npiggin@suse.de>, Rik van Riel <riel@redhat.com>,
Mel Gorman <mel@csn.ul.ie>, Dave Hansen <dave@linux.vnet.ibm.com>,
Benjamin Herrenschmidt <benh@kernel.crashing.org>,
Ingo Molnar <mingo@elte.hu>, Mike Travis <travis@sgi.com>,
KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>,
Christoph Lameter <cl@linux-foundation.org>,
Chris Wright <chrisw@sous-sol.org>,
bpicco@redhat.com,
KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>,
Balbir Singh <balbir@linux.vnet.ibm.com>,
Arnd Bergmann <arnd@arndb.de>,
"Michael S. Tsirkin" <mst@redhat.com>,
Peter Zijlstra <peterz@infradead.org>,
Andrea Arcangeli <aarcange@redhat.com>
Subject: [patch 15/35] add pmd mangling functions to x86
Date: Tue, 09 Mar 2010 20:39:16 +0100 [thread overview]
Message-ID: <20100309194313.940190685@redhat.com> (raw)
In-Reply-To: 20100309193901.207868642@redhat.com
[-- Attachment #1: pmd_mangling_x86 --]
[-- Type: text/plain, Size: 7108 bytes --]
From: Andrea Arcangeli <aarcange@redhat.com>
Add needed pmd mangling functions with simmetry with their pte counterparts.
pmdp_freeze_flush is the only exception only present on the pmd side and it's
needed to serialize the VM against split_huge_page, it simply atomically clears
the present bit in the same way pmdp_clear_flush_young atomically clears the
accessed bit (and both need to flush the tlb to make it effective, which is
mandatory to happen synchronously for pmdp_freeze_flush).
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Rik van Riel <riel@redhat.com>
---
arch/x86/include/asm/pgtable.h | 8 +-
arch/x86/include/asm/pgtable_64.h | 105 ++++++++++++++++++++++++++++++++++++++
arch/x86/mm/pgtable.c | 66 +++++++++++++++++++++++
3 files changed, 175 insertions(+), 4 deletions(-)
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -300,15 +300,15 @@ pmd_t *populate_extra_pmd(unsigned long
pte_t *populate_extra_pte(unsigned long vaddr);
#endif /* __ASSEMBLY__ */
+#ifndef __ASSEMBLY__
+#include <linux/mm_types.h>
+
#ifdef CONFIG_X86_32
# include "pgtable_32.h"
#else
# include "pgtable_64.h"
#endif
-#ifndef __ASSEMBLY__
-#include <linux/mm_types.h>
-
static inline int pte_none(pte_t pte)
{
return !pte.pte;
@@ -351,7 +351,7 @@ static inline unsigned long pmd_page_vad
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
-#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
+#define pmd_page(pmd) pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT)
/*
* the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -72,6 +72,19 @@ static inline pte_t native_ptep_get_and_
#endif
}
+static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
+{
+#ifdef CONFIG_SMP
+ return native_make_pmd(xchg(&xp->pmd, 0));
+#else
+ /* native_local_pmdp_get_and_clear,
+ but duplicated because of cyclic dependency */
+ pmd_t ret = *xp;
+ native_pmd_clear(NULL, 0, xp);
+ return ret;
+#endif
+}
+
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
{
*pmdp = pmd;
@@ -181,6 +194,98 @@ static inline int pmd_trans_huge(pmd_t p
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
+
+#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
+extern int pmdp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp,
+ pmd_t entry, int dirty);
+
+#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmdp);
+
+#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
+extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
+
+
+#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
+extern void pmdp_splitting_flush(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmdp);
+
+#define __HAVE_ARCH_PMD_WRITE
+static inline int pmd_write(pmd_t pmd)
+{
+ return pmd_flags(pmd) & _PAGE_RW;
+}
+
+#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
+static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp)
+{
+ pmd_t pmd = native_pmdp_get_and_clear(pmdp);
+ pmd_update(mm, addr, pmdp);
+ return pmd;
+}
+
+#define __HAVE_ARCH_PMDP_SET_WRPROTECT
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pmd_t *pmdp)
+{
+ clear_bit(_PAGE_BIT_RW, (unsigned long *)&pmdp->pmd);
+ pmd_update(mm, addr, pmdp);
+}
+
+static inline int pmd_young(pmd_t pmd)
+{
+ return pmd_flags(pmd) & _PAGE_ACCESSED;
+}
+
+static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
+{
+ pmdval_t v = native_pmd_val(pmd);
+
+ return native_make_pmd(v | set);
+}
+
+static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
+{
+ pmdval_t v = native_pmd_val(pmd);
+
+ return native_make_pmd(v & ~clear);
+}
+
+static inline pmd_t pmd_mkold(pmd_t pmd)
+{
+ return pmd_clear_flags(pmd, _PAGE_ACCESSED);
+}
+
+static inline pmd_t pmd_wrprotect(pmd_t pmd)
+{
+ return pmd_clear_flags(pmd, _PAGE_RW);
+}
+
+static inline pmd_t pmd_mkdirty(pmd_t pmd)
+{
+ return pmd_set_flags(pmd, _PAGE_DIRTY);
+}
+
+static inline pmd_t pmd_mkhuge(pmd_t pmd)
+{
+ return pmd_set_flags(pmd, _PAGE_PSE);
+}
+
+static inline pmd_t pmd_mkyoung(pmd_t pmd)
+{
+ return pmd_set_flags(pmd, _PAGE_ACCESSED);
+}
+
+static inline pmd_t pmd_mkwrite(pmd_t pmd)
+{
+ return pmd_set_flags(pmd, _PAGE_RW);
+}
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_PGTABLE_64_H */
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -309,6 +309,25 @@ int ptep_set_access_flags(struct vm_area
return changed;
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+int pmdp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp,
+ pmd_t entry, int dirty)
+{
+ int changed = !pmd_same(*pmdp, entry);
+
+ VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+
+ if (changed && dirty) {
+ *pmdp = entry;
+ pmd_update_defer(vma->vm_mm, address, pmdp);
+ flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+ }
+
+ return changed;
+}
+#endif
+
int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
@@ -324,6 +343,23 @@ int ptep_test_and_clear_young(struct vm_
return ret;
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmdp)
+{
+ int ret = 0;
+
+ if (pmd_young(*pmdp))
+ ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
+ (unsigned long *) &pmdp->pmd);
+
+ if (ret)
+ pmd_update(vma->vm_mm, addr, pmdp);
+
+ return ret;
+}
+#endif
+
int ptep_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
@@ -336,6 +372,36 @@ int ptep_clear_flush_young(struct vm_are
return young;
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+int pmdp_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
+{
+ int young;
+
+ VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+
+ young = pmdp_test_and_clear_young(vma, address, pmdp);
+ if (young)
+ flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+
+ return young;
+}
+
+void pmdp_splitting_flush(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
+{
+ int set;
+ VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+ set = !test_and_set_bit(_PAGE_BIT_SPLITTING,
+ (unsigned long *)&pmdp->pmd);
+ if (set) {
+ pmd_update(vma->vm_mm, address, pmdp);
+ /* need tlb flush only to serialize against gup-fast */
+ flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+ }
+}
+#endif
+
/**
* reserve_top_address - reserves a hole in the top of kernel address space
* @reserve - size of hole to reserve
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2010-03-09 19:44 UTC|newest]
Thread overview: 38+ messages / expand[flat|nested] mbox.gz Atom feed top
2010-03-09 19:39 [patch 00/35] Transparent Hugepage support #13 aarcange
2010-03-09 19:39 ` [patch 01/35] define MADV_HUGEPAGE aarcange
2010-03-09 19:39 ` [patch 02/35] compound_lock aarcange
2010-03-09 19:39 ` [patch 03/35] alter compound get_page/put_page aarcange
2010-03-09 19:39 ` [patch 04/35] update futex compound knowledge aarcange
2010-03-09 19:39 ` [patch 05/35] fix bad_page to show the real reason the page is bad aarcange
2010-03-09 19:39 ` [patch 06/35] clear compound mapping aarcange
2010-03-09 19:39 ` [patch 07/35] add native_set_pmd_at aarcange
2010-03-09 19:39 ` [patch 08/35] add pmd paravirt ops aarcange
2010-03-09 19:39 ` [patch 09/35] no paravirt version of pmd ops aarcange
2010-03-09 19:39 ` [patch 10/35] export maybe_mkwrite aarcange
2010-03-09 19:39 ` [patch 11/35] comment reminder in destroy_compound_page aarcange
2010-03-09 19:39 ` [patch 12/35] config_transparent_hugepage aarcange
2010-03-09 19:39 ` [patch 13/35] special pmd_trans_* functions aarcange
2010-03-09 19:39 ` [patch 14/35] add pmd mangling generic functions aarcange
2010-03-09 19:39 ` aarcange [this message]
2010-03-09 19:39 ` [patch 16/35] bail out gup_fast on splitting pmd aarcange
2010-03-09 19:39 ` [patch 17/35] pte alloc trans splitting aarcange
2010-03-09 19:39 ` [patch 18/35] add pmd mmu_notifier helpers aarcange
2010-03-09 19:39 ` [patch 19/35] clear page compound aarcange
2010-03-09 19:39 ` [patch 20/35] add pmd_huge_pte to mm_struct aarcange
2010-03-09 19:39 ` [patch 21/35] split_huge_page_mm/vma aarcange
2010-03-09 19:39 ` [patch 22/35] split_huge_page paging aarcange
2010-03-09 19:39 ` [patch 23/35] clear_copy_huge_page aarcange
2010-03-09 19:39 ` [patch 24/35] kvm mmu transparent hugepage support aarcange
2010-03-09 19:39 ` [patch 25/35] _GFP_NO_KSWAPD aarcange
2010-03-09 19:39 ` [patch 26/35] dont alloc harder for gfp nomemalloc even if nowait aarcange
2010-03-09 19:39 ` [patch 27/35] transparent hugepage core aarcange
2010-03-09 19:39 ` [patch 28/35] adapt to mm_counter in -mm aarcange
2010-03-09 19:39 ` [patch 29/35] verify pmd_trans_huge isnt leaking aarcange
2010-03-09 19:39 ` [patch 30/35] madvise(MADV_HUGEPAGE) aarcange
2010-03-09 19:39 ` [patch 31/35] pmd_trans_huge migrate bugcheck aarcange
2010-03-09 19:39 ` [patch 32/35] memcg compound aarcange
2010-03-09 19:39 ` [patch 33/35] memcg huge memory aarcange
2010-03-09 19:39 ` [patch 34/35] transparent hugepage vmstat aarcange
2010-03-09 19:39 ` [patch 35/35] khugepaged aarcange
2010-03-11 0:55 ` [patch 00/35] Transparent Hugepage support #13 Andrea Arcangeli
-- strict thread matches above, loose matches on Subject: below --
2010-02-26 20:04 [patch 00/35] Transparent Hugepage support #12 aarcange
2010-02-26 20:04 ` [patch 15/35] add pmd mangling functions to x86 aarcange
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20100309194313.940190685@redhat.com \
--to=aarcange@redhat.com \
--cc=agl@us.ibm.com \
--cc=akpm@linux-foundation.org \
--cc=arnd@arndb.de \
--cc=avi@redhat.com \
--cc=balbir@linux.vnet.ibm.com \
--cc=benh@kernel.crashing.org \
--cc=bpicco@redhat.com \
--cc=chrisw@sous-sol.org \
--cc=cl@linux-foundation.org \
--cc=dave@linux.vnet.ibm.com \
--cc=hugh.dickins@tiscali.co.uk \
--cc=ieidus@redhat.com \
--cc=kamezawa.hiroyu@jp.fujitsu.com \
--cc=kosaki.motohiro@jp.fujitsu.com \
--cc=linux-mm@kvack.org \
--cc=mel@csn.ul.ie \
--cc=mingo@elte.hu \
--cc=mst@redhat.com \
--cc=mtosatti@redhat.com \
--cc=npiggin@suse.de \
--cc=peterz@infradead.org \
--cc=riel@redhat.com \
--cc=travis@sgi.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).