linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Andrea Arcangeli <aarcange@redhat.com>
To: linux-mm@kvack.org, Andrew Morton <akpm@linux-foundation.org>
Cc: Marcelo Tosatti <mtosatti@redhat.com>,
	Adam Litke <agl@us.ibm.com>, Avi Kivity <avi@redhat.com>,
	Izik Eidus <ieidus@redhat.com>,
	Hugh Dickins <hugh.dickins@tiscali.co.uk>,
	Nick Piggin <npiggin@suse.de>, Rik van Riel <riel@redhat.com>,
	Mel Gorman <mel@csn.ul.ie>, Dave Hansen <dave@linux.vnet.ibm.com>,
	Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Ingo Molnar <mingo@elte.hu>, Mike Travis <travis@sgi.com>,
	KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>,
	Christoph Lameter <cl@linux-foundation.org>,
	Chris Wright <chrisw@sous-sol.org>,
	bpicco@redhat.com,
	KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>,
	Balbir Singh <balbir@linux.vnet.ibm.com>,
	Arnd Bergmann <arnd@arndb.de>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>,
	Chris Mason <chris.mason@oracle.com>
Subject: [PATCH 42 of 67] add x86 32bit support
Date: Thu, 08 Apr 2010 03:51:25 +0200	[thread overview]
Message-ID: <5b854458f12cbbcc336c.1270691485@v2.random> (raw)
In-Reply-To: <patchbomb.1270691443@v2.random>

From: Johannes Weiner <hannes@cmpxchg.org>

Add support for transparent hugepages to x86 32bit.

Share the same VM_ bitflag for VM_MAPPED_COPY. mm/nommu.c will never support
transparent hugepages.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
---

diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
@@ -46,6 +46,15 @@ static inline pte_t native_ptep_get_and_
 #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
 #endif
 
+#ifdef CONFIG_SMP
+static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
+{
+	return __pmd(xchg((pmdval_t *)xp, 0));
+}
+#else
+#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
+#endif
+
 /*
  * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
  * split up the 29 bits of offset into this range:
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -104,6 +104,29 @@ static inline pte_t native_ptep_get_and_
 #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
 #endif
 
+#ifdef CONFIG_SMP
+union split_pmd {
+	struct {
+		u32 pmd_low;
+		u32 pmd_high;
+	};
+	pmd_t pmd;
+};
+static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
+{
+	union split_pmd res, *orig = (union split_pmd *)pmdp;
+
+	/* xchg acts as a barrier before setting of the high bits */
+	res.pmd_low = xchg(&orig->pmd_low, 0);
+	res.pmd_high = orig->pmd_high;
+	orig->pmd_high = 0;
+
+	return res.pmd;
+}
+#else
+#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
+#endif
+
 /*
  * Bits 0, 6 and 7 are taken in the low part of the pte,
  * put the 32 bits of offset into the high part.
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -95,6 +95,11 @@ static inline int pte_young(pte_t pte)
 	return pte_flags(pte) & _PAGE_ACCESSED;
 }
 
+static inline int pmd_young(pmd_t pmd)
+{
+	return pmd_flags(pmd) & _PAGE_ACCESSED;
+}
+
 static inline int pte_write(pte_t pte)
 {
 	return pte_flags(pte) & _PAGE_RW;
@@ -143,6 +148,18 @@ static inline int pmd_large(pmd_t pte)
 		(_PAGE_PSE | _PAGE_PRESENT);
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int pmd_trans_splitting(pmd_t pmd)
+{
+	return pmd_val(pmd) & _PAGE_SPLITTING;
+}
+
+static inline int pmd_trans_huge(pmd_t pmd)
+{
+	return pmd_val(pmd) & _PAGE_PSE;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
 static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
 {
 	pteval_t v = native_pte_val(pte);
@@ -217,6 +234,55 @@ static inline pte_t pte_mkspecial(pte_t 
 	return pte_set_flags(pte, _PAGE_SPECIAL);
 }
 
+static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
+{
+	pmdval_t v = native_pmd_val(pmd);
+
+	return __pmd(v | set);
+}
+
+static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
+{
+	pmdval_t v = native_pmd_val(pmd);
+
+	return __pmd(v & ~clear);
+}
+
+static inline pmd_t pmd_mkold(pmd_t pmd)
+{
+	return pmd_clear_flags(pmd, _PAGE_ACCESSED);
+}
+
+static inline pmd_t pmd_wrprotect(pmd_t pmd)
+{
+	return pmd_clear_flags(pmd, _PAGE_RW);
+}
+
+static inline pmd_t pmd_mkdirty(pmd_t pmd)
+{
+	return pmd_set_flags(pmd, _PAGE_DIRTY);
+}
+
+static inline pmd_t pmd_mkhuge(pmd_t pmd)
+{
+	return pmd_set_flags(pmd, _PAGE_PSE);
+}
+
+static inline pmd_t pmd_mkyoung(pmd_t pmd)
+{
+	return pmd_set_flags(pmd, _PAGE_ACCESSED);
+}
+
+static inline pmd_t pmd_mkwrite(pmd_t pmd)
+{
+	return pmd_set_flags(pmd, _PAGE_RW);
+}
+
+static inline pmd_t pmd_mknotpresent(pmd_t pmd)
+{
+	return pmd_clear_flags(pmd, _PAGE_PRESENT);
+}
+
 /*
  * Mask out unsupported bits in a present pgprot.  Non-present pgprots
  * can use those bits for other purposes, so leave them be.
@@ -525,6 +591,14 @@ static inline pte_t native_local_ptep_ge
 	return res;
 }
 
+static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
+{
+	pmd_t res = *pmdp;
+
+	native_pmd_clear(pmdp);
+	return res;
+}
+
 static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
 				     pte_t *ptep , pte_t pte)
 {
@@ -612,6 +686,49 @@ static inline void ptep_set_wrprotect(st
 	pte_update(mm, addr, ptep);
 }
 
+#define mk_pmd(page, pgprot)   pfn_pmd(page_to_pfn(page), (pgprot))
+
+#define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
+extern int pmdp_set_access_flags(struct vm_area_struct *vma,
+				 unsigned long address, pmd_t *pmdp,
+				 pmd_t entry, int dirty);
+
+#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+				     unsigned long addr, pmd_t *pmdp);
+
+#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
+extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
+				  unsigned long address, pmd_t *pmdp);
+
+
+#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
+extern void pmdp_splitting_flush(struct vm_area_struct *vma,
+				 unsigned long addr, pmd_t *pmdp);
+
+#define __HAVE_ARCH_PMD_WRITE
+static inline int pmd_write(pmd_t pmd)
+{
+	return pmd_flags(pmd) & _PAGE_RW;
+}
+
+#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
+static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr,
+				       pmd_t *pmdp)
+{
+	pmd_t pmd = native_pmdp_get_and_clear(pmdp);
+	pmd_update(mm, addr, pmdp);
+	return pmd;
+}
+
+#define __HAVE_ARCH_PMDP_SET_WRPROTECT
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+				      unsigned long addr, pmd_t *pmdp)
+{
+	clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
+	pmd_update(mm, addr, pmdp);
+}
+
 /*
  * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
  *
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -182,115 +182,6 @@ extern void cleanup_highmap(void);
 
 #define __HAVE_ARCH_PTE_SAME
 
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static inline int pmd_trans_splitting(pmd_t pmd)
-{
-	return pmd_val(pmd) & _PAGE_SPLITTING;
-}
-
-static inline int pmd_trans_huge(pmd_t pmd)
-{
-	return pmd_val(pmd) & _PAGE_PSE;
-}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-
-#define mk_pmd(page, pgprot)   pfn_pmd(page_to_pfn(page), (pgprot))
-
-#define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
-extern int pmdp_set_access_flags(struct vm_area_struct *vma,
-				 unsigned long address, pmd_t *pmdp,
-				 pmd_t entry, int dirty);
-
-#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
-extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
-				     unsigned long addr, pmd_t *pmdp);
-
-#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
-extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
-				  unsigned long address, pmd_t *pmdp);
-
-
-#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
-extern void pmdp_splitting_flush(struct vm_area_struct *vma,
-				 unsigned long addr, pmd_t *pmdp);
-
-#define __HAVE_ARCH_PMD_WRITE
-static inline int pmd_write(pmd_t pmd)
-{
-	return pmd_flags(pmd) & _PAGE_RW;
-}
-
-#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
-static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr,
-				       pmd_t *pmdp)
-{
-	pmd_t pmd = native_pmdp_get_and_clear(pmdp);
-	pmd_update(mm, addr, pmdp);
-	return pmd;
-}
-
-#define __HAVE_ARCH_PMDP_SET_WRPROTECT
-static inline void pmdp_set_wrprotect(struct mm_struct *mm,
-				      unsigned long addr, pmd_t *pmdp)
-{
-	clear_bit(_PAGE_BIT_RW, (unsigned long *)&pmdp->pmd);
-	pmd_update(mm, addr, pmdp);
-}
-
-static inline int pmd_young(pmd_t pmd)
-{
-	return pmd_flags(pmd) & _PAGE_ACCESSED;
-}
-
-static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
-{
-	pmdval_t v = native_pmd_val(pmd);
-
-	return native_make_pmd(v | set);
-}
-
-static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
-{
-	pmdval_t v = native_pmd_val(pmd);
-
-	return native_make_pmd(v & ~clear);
-}
-
-static inline pmd_t pmd_mkold(pmd_t pmd)
-{
-	return pmd_clear_flags(pmd, _PAGE_ACCESSED);
-}
-
-static inline pmd_t pmd_wrprotect(pmd_t pmd)
-{
-	return pmd_clear_flags(pmd, _PAGE_RW);
-}
-
-static inline pmd_t pmd_mkdirty(pmd_t pmd)
-{
-	return pmd_set_flags(pmd, _PAGE_DIRTY);
-}
-
-static inline pmd_t pmd_mkhuge(pmd_t pmd)
-{
-	return pmd_set_flags(pmd, _PAGE_PSE);
-}
-
-static inline pmd_t pmd_mkyoung(pmd_t pmd)
-{
-	return pmd_set_flags(pmd, _PAGE_ACCESSED);
-}
-
-static inline pmd_t pmd_mkwrite(pmd_t pmd)
-{
-	return pmd_set_flags(pmd, _PAGE_RW);
-}
-
-static inline pmd_t pmd_mknotpresent(pmd_t pmd)
-{
-	return pmd_clear_flags(pmd, _PAGE_PRESENT);
-}
-
 #endif /* !__ASSEMBLY__ */
 
 #endif /* _ASM_X86_PGTABLE_64_H */
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -352,7 +352,7 @@ int pmdp_test_and_clear_young(struct vm_
 
 	if (pmd_young(*pmdp))
 		ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
-					 (unsigned long *) &pmdp->pmd);
+					 (unsigned long *)pmdp);
 
 	if (ret)
 		pmd_update(vma->vm_mm, addr, pmdp);
@@ -394,7 +394,7 @@ void pmdp_splitting_flush(struct vm_area
 	int set;
 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 	set = !test_and_set_bit(_PAGE_BIT_SPLITTING,
-				(unsigned long *)&pmdp->pmd);
+				(unsigned long *)pmdp);
 	if (set) {
 		pmd_update(vma->vm_mm, address, pmdp);
 		/* need tlb flush only to serialize against gup-fast */
diff --git a/include/linux/mm.h b/include/linux/mm.h
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -98,7 +98,11 @@ extern unsigned int kobjsize(const void 
 #define VM_NORESERVE	0x00200000	/* should the VM suppress accounting */
 #define VM_HUGETLB	0x00400000	/* Huge TLB Page VM */
 #define VM_NONLINEAR	0x00800000	/* Is non-linear (remap_file_pages) */
+#ifndef CONFIG_TRANSPARENT_HUGEPAGE
 #define VM_MAPPED_COPY	0x01000000	/* T if mapped copy of data (nommu mmap) */
+#else
+#define VM_HUGEPAGE	0x01000000	/* MADV_HUGEPAGE marked this vma */
+#endif
 #define VM_INSERTPAGE	0x02000000	/* The vma has had "vm_insert_page()" done on it */
 #define VM_ALWAYSDUMP	0x04000000	/* Always include in core dumps */
 
@@ -107,9 +111,6 @@ extern unsigned int kobjsize(const void 
 #define VM_SAO		0x20000000	/* Strong Access Ordering (powerpc) */
 #define VM_PFN_AT_MMAP	0x40000000	/* PFNMAP vma that is fully mapped at mmap time */
 #define VM_MERGEABLE	0x80000000	/* KSM may merge identical pages */
-#if BITS_PER_LONG > 32
-#define VM_HUGEPAGE	0x100000000UL	/* MADV_HUGEPAGE marked this vma */
-#endif
 
 #ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
diff --git a/mm/Kconfig b/mm/Kconfig
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -290,7 +290,7 @@ config NOMMU_INITIAL_TRIM_EXCESS
 
 config TRANSPARENT_HUGEPAGE
 	bool "Transparent Hugepage support" if EMBEDDED
-	depends on X86_64 && MMU
+	depends on X86 && MMU
 	default y
 	help
 	  Transparent Hugepages allows the kernel to use huge pages and

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2010-04-08  2:57 UTC|newest]

Thread overview: 95+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2010-04-08  1:50 [PATCH 00 of 67] Transparent Hugepage Support #18 Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 01 of 67] define MADV_HUGEPAGE Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 02 of 67] compound_lock Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 03 of 67] alter compound get_page/put_page Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 04 of 67] update futex compound knowledge Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 05 of 67] fix bad_page to show the real reason the page is bad Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 06 of 67] clear compound mapping Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 07 of 67] add native_set_pmd_at Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 08 of 67] add pmd paravirt ops Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 09 of 67] no paravirt version of pmd ops Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 10 of 67] export maybe_mkwrite Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 11 of 67] comment reminder in destroy_compound_page Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 12 of 67] config_transparent_hugepage Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 13 of 67] special pmd_trans_* functions Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 14 of 67] add pmd mangling generic functions Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 15 of 67] add pmd mangling functions to x86 Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 16 of 67] bail out gup_fast on splitting pmd Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 17 of 67] pte alloc trans splitting Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 18 of 67] add pmd mmu_notifier helpers Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 19 of 67] clear page compound Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 20 of 67] add pmd_huge_pte to mm_struct Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 21 of 67] This fixes some minor issues that bugged me while going over the code: Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 22 of 67] Split out functions to handle hugetlb ranges, pte ranges and unmapped Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 23 of 67] Instead of passing a start address and a number of pages into the helper Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 24 of 67] Do page table walks with the well-known nested loops we use in several Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 25 of 67] split_huge_page_mm/vma Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 26 of 67] split_huge_page paging Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 27 of 67] clear_copy_huge_page Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 28 of 67] kvm mmu transparent hugepage support Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 29 of 67] _GFP_NO_KSWAPD Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 30 of 67] don't alloc harder for gfp nomemalloc even if nowait Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 31 of 67] transparent hugepage core Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 32 of 67] verify pmd_trans_huge isn't leaking Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 33 of 67] madvise(MADV_HUGEPAGE) Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 34 of 67] pmd_trans_huge migrate bugcheck Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 35 of 67] memcg compound Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 36 of 67] memcg huge memory Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 37 of 67] transparent hugepage vmstat Andrea Arcangeli
2010-04-08 11:53   ` Avi Kivity
2010-04-08  1:51 ` [PATCH 38 of 67] khugepaged Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 39 of 67] don't leave orhpaned swap cache after ksm merging Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 40 of 67] skip transhuge pages in ksm for now Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 41 of 67] remove PG_buddy Andrea Arcangeli
2010-04-08  1:51 ` Andrea Arcangeli [this message]
2010-04-08  1:51 ` [PATCH 43 of 67] mincore transparent hugepage support Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 44 of 67] add pmd_modify Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 45 of 67] mprotect: pass vma down to page table walkers Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 46 of 67] mprotect: transparent huge page support Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 47 of 67] set recommended min free kbytes Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 48 of 67] remove lumpy_reclaim Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 49 of 67] Take a reference to the anon_vma before migrating Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 50 of 67] Do not try to migrate unmapped anonymous pages Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 51 of 67] Share the anon_vma ref counts between KSM and page migration Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 52 of 67] Allow CONFIG_MIGRATION to be set without CONFIG_NUMA or memory hot-remove Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 53 of 67] Export unusable free space index via /proc/unusable_index Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 54 of 67] Export fragmentation index via /proc/extfrag_index Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 55 of 67] Move definition for LRU isolation modes to a header Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 56 of 67] Memory compaction core Andrea Arcangeli
2010-04-08 16:18   ` Johannes Weiner
2010-04-08 16:46     ` Andrea Arcangeli
2010-04-08 17:09       ` Andrea Arcangeli
2010-04-08 17:14         ` Andrea Arcangeli
2010-04-08 17:56           ` Johannes Weiner
2010-04-08 17:58             ` Andrea Arcangeli
2010-04-08 18:48               ` Johannes Weiner
2010-04-08 21:23                 ` Andrea Arcangeli
2010-04-08 21:32                   ` Andrea Arcangeli
2010-04-09 10:51                   ` Mel Gorman
2010-04-09 15:37                     ` Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 57 of 67] Add /proc trigger for memory compaction Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 58 of 67] Add /sys trigger for per-node " Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 59 of 67] Direct compact when a high-order allocation fails Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 60 of 67] Add a tunable that decides when memory should be compacted and when it should be reclaimed Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 61 of 67] Allow the migration of PageSwapCache pages Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 62 of 67] do not display compaction-related stats when !CONFIG_COMPACTION Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 63 of 67] disable migreate_prep() Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 64 of 67] page buddy can go away before reading page_order Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 65 of 67] select CONFIG_COMPACTION if TRANSPARENT_HUGEPAGE enabled Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 66 of 67] enable direct defrag Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 67 of 67] memcg fix prepare migration Andrea Arcangeli
2010-04-08  3:57   ` Daisuke Nishimura
2010-04-13  1:29     ` Andrew Morton
2010-04-09  8:13   ` KAMEZAWA Hiroyuki
2010-04-08  9:39 ` [PATCH 00 of 67] Transparent Hugepage Support #18 Avi Kivity
2010-04-08 11:44   ` Avi Kivity
2010-04-08 15:23     ` Andrea Arcangeli
2010-04-08 15:27       ` Avi Kivity
2010-04-08 16:02         ` Andrea Arcangeli
2010-04-08 15:32       ` Christoph Lameter
2010-04-08 23:17         ` Andrea Arcangeli
2010-04-09  8:45     ` Avi Kivity
2010-04-09 15:50       ` Andrea Arcangeli
2010-04-09 17:44         ` Avi Kivity
2010-04-09  2:05 ` Transparent Hugepage Support #19 Andrea Arcangeli
2010-04-09 15:43   ` Andrea Arcangeli

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=5b854458f12cbbcc336c.1270691485@v2.random \
    --to=aarcange@redhat.com \
    --cc=agl@us.ibm.com \
    --cc=akpm@linux-foundation.org \
    --cc=arnd@arndb.de \
    --cc=avi@redhat.com \
    --cc=balbir@linux.vnet.ibm.com \
    --cc=benh@kernel.crashing.org \
    --cc=bpicco@redhat.com \
    --cc=chris.mason@oracle.com \
    --cc=chrisw@sous-sol.org \
    --cc=cl@linux-foundation.org \
    --cc=dave@linux.vnet.ibm.com \
    --cc=hannes@cmpxchg.org \
    --cc=hugh.dickins@tiscali.co.uk \
    --cc=ieidus@redhat.com \
    --cc=kamezawa.hiroyu@jp.fujitsu.com \
    --cc=kosaki.motohiro@jp.fujitsu.com \
    --cc=linux-mm@kvack.org \
    --cc=mel@csn.ul.ie \
    --cc=mingo@elte.hu \
    --cc=mst@redhat.com \
    --cc=mtosatti@redhat.com \
    --cc=nishimura@mxp.nes.nec.co.jp \
    --cc=npiggin@suse.de \
    --cc=peterz@infradead.org \
    --cc=riel@redhat.com \
    --cc=travis@sgi.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).