From: David Gibson <dwg@au1.ibm.com>
To: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
Cc: paulus@samba.org, linuxppc-dev@lists.ozlabs.org, linux-mm@kvack.org
Subject: Re: [PATCH -V5 06/25] powerpc: Reduce PTE table memory wastage
Date: Wed, 10 Apr 2013 14:46:11 +1000 [thread overview]
Message-ID: <20130410044611.GF8165@truffula.fritz.box> (raw)
In-Reply-To: <1365055083-31956-7-git-send-email-aneesh.kumar@linux.vnet.ibm.com>
[-- Attachment #1: Type: text/plain, Size: 15308 bytes --]
On Thu, Apr 04, 2013 at 11:27:44AM +0530, Aneesh Kumar K.V wrote:
> From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
>
> We allocate one page for the last level of linux page table. With THP and
> large page size of 16MB, that would mean we are wasting large part
> of that page. To map 16MB area, we only need a PTE space of 2K with 64K
> page size. This patch reduce the space wastage by sharing the page
> allocated for the last level of linux page table with multiple pmd
> entries. We call these smaller chunks PTE page fragments and allocated
> page, PTE page.
>
> In order to support systems which doesn't have 64K HPTE support, we also
> add another 2K to PTE page fragment. The second half of the PTE fragments
> is used for storing slot and secondary bit information of an HPTE. With this
> we now have a 4K PTE fragment.
>
> We use a simple approach to share the PTE page. On allocation, we bump the
> PTE page refcount to 16 and share the PTE page with the next 16 pte alloc
> request. This should help in the node locality of the PTE page fragment,
> assuming that the immediate pte alloc request will mostly come from the
> same NUMA node. We don't try to reuse the freed PTE page fragment. Hence
> we could be waisting some space.
>
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> ---
> arch/powerpc/include/asm/mmu-book3e.h | 4 +
> arch/powerpc/include/asm/mmu-hash64.h | 4 +
> arch/powerpc/include/asm/page.h | 4 +
> arch/powerpc/include/asm/pgalloc-64.h | 72 ++++-------------
> arch/powerpc/kernel/setup_64.c | 4 +-
> arch/powerpc/mm/mmu_context_hash64.c | 35 +++++++++
> arch/powerpc/mm/pgtable_64.c | 137 +++++++++++++++++++++++++++++++++
> 7 files changed, 202 insertions(+), 58 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
> index 99d43e0..affbd68 100644
> --- a/arch/powerpc/include/asm/mmu-book3e.h
> +++ b/arch/powerpc/include/asm/mmu-book3e.h
> @@ -231,6 +231,10 @@ typedef struct {
> u64 high_slices_psize; /* 4 bits per slice for now */
> u16 user_psize; /* page size index */
> #endif
> +#ifdef CONFIG_PPC_64K_PAGES
> + /* for 4K PTE fragment support */
> + struct page *pgtable_page;
> +#endif
> } mm_context_t;
>
> /* Page size definitions, common between 32 and 64-bit
> diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
> index 35bb51e..300ac3c 100644
> --- a/arch/powerpc/include/asm/mmu-hash64.h
> +++ b/arch/powerpc/include/asm/mmu-hash64.h
> @@ -498,6 +498,10 @@ typedef struct {
> unsigned long acop; /* mask of enabled coprocessor types */
> unsigned int cop_pid; /* pid value used with coprocessors */
> #endif /* CONFIG_PPC_ICSWX */
> +#ifdef CONFIG_PPC_64K_PAGES
> + /* for 4K PTE fragment support */
> + struct page *pgtable_page;
> +#endif
> } mm_context_t;
>
>
> diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
> index f072e97..38e7ff6 100644
> --- a/arch/powerpc/include/asm/page.h
> +++ b/arch/powerpc/include/asm/page.h
> @@ -378,7 +378,11 @@ void arch_free_page(struct page *page, int order);
>
> struct vm_area_struct;
>
> +#ifdef CONFIG_PPC_64K_PAGES
> +typedef pte_t *pgtable_t;
> +#else
> typedef struct page *pgtable_t;
> +#endif
Ugh, that's pretty horrible, though I don't see an easy way around it.
> #include <asm-generic/memory_model.h>
> #endif /* __ASSEMBLY__ */
> diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
> index cdbf555..3418989 100644
> --- a/arch/powerpc/include/asm/pgalloc-64.h
> +++ b/arch/powerpc/include/asm/pgalloc-64.h
> @@ -150,6 +150,13 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
>
> #else /* if CONFIG_PPC_64K_PAGES */
>
> +extern pte_t *page_table_alloc(struct mm_struct *, unsigned long, int);
> +extern void page_table_free(struct mm_struct *, unsigned long *, int);
> +extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
> +#ifdef CONFIG_SMP
> +extern void __tlb_remove_table(void *_table);
> +#endif
> +
> #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
>
> static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
> @@ -161,90 +168,42 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
> static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
> pgtable_t pte_page)
> {
> - pmd_populate_kernel(mm, pmd, page_address(pte_page));
> + pmd_set(pmd, (unsigned long)pte_page);
> }
>
> static inline pgtable_t pmd_pgtable(pmd_t pmd)
> {
> - return pmd_page(pmd);
> + return (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE);
> }
>
> static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
> unsigned long address)
> {
> - return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
> + return (pte_t *)page_table_alloc(mm, address, 1);
> }
>
> static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
> - unsigned long address)
> + unsigned long address)
> {
> - struct page *page;
> - pte_t *pte;
> -
> - pte = pte_alloc_one_kernel(mm, address);
> - if (!pte)
> - return NULL;
> - page = virt_to_page(pte);
> - pgtable_page_ctor(page);
> - return page;
> + return (pgtable_t)page_table_alloc(mm, address, 0);
> }
>
> static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
> {
> - free_page((unsigned long)pte);
> + page_table_free(mm, (unsigned long *)pte, 1);
> }
>
> static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
> {
> - pgtable_page_dtor(ptepage);
> - __free_page(ptepage);
> -}
> -
> -static inline void pgtable_free(void *table, unsigned index_size)
> -{
> - if (!index_size)
> - free_page((unsigned long)table);
> - else {
> - BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
> - kmem_cache_free(PGT_CACHE(index_size), table);
> - }
> + page_table_free(mm, (unsigned long *)ptepage, 0);
> }
>
> -#ifdef CONFIG_SMP
> -static inline void pgtable_free_tlb(struct mmu_gather *tlb,
> - void *table, int shift)
> -{
> - unsigned long pgf = (unsigned long)table;
> - BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
> - pgf |= shift;
> - tlb_remove_table(tlb, (void *)pgf);
> -}
> -
> -static inline void __tlb_remove_table(void *_table)
> -{
> - void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
> - unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
> -
> - pgtable_free(table, shift);
> -}
> -#else /* !CONFIG_SMP */
> -static inline void pgtable_free_tlb(struct mmu_gather *tlb,
> - void *table, int shift)
> -{
> - pgtable_free(table, shift);
> -}
> -#endif /* CONFIG_SMP */
> -
> static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
> unsigned long address)
> {
> - struct page *page = page_address(table);
> -
> tlb_flush_pgtable(tlb, address);
> - pgtable_page_dtor(page);
> - pgtable_free_tlb(tlb, page, 0);
> + pgtable_free_tlb(tlb, table, 0);
> }
> -
> #endif /* CONFIG_PPC_64K_PAGES */
>
> static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
> @@ -258,7 +217,6 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
> kmem_cache_free(PGT_CACHE(PMD_INDEX_SIZE), pmd);
> }
>
> -
> #define __pmd_free_tlb(tlb, pmd, addr) \
> pgtable_free_tlb(tlb, pmd, PMD_INDEX_SIZE)
> #ifndef CONFIG_PPC_64K_PAGES
> diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
> index 6da881b..04d833c 100644
> --- a/arch/powerpc/kernel/setup_64.c
> +++ b/arch/powerpc/kernel/setup_64.c
> @@ -575,7 +575,9 @@ void __init setup_arch(char **cmdline_p)
> init_mm.end_code = (unsigned long) _etext;
> init_mm.end_data = (unsigned long) _edata;
> init_mm.brk = klimit;
> -
> +#ifdef CONFIG_PPC_64K_PAGES
> + init_mm.context.pgtable_page = NULL;
> +#endif
> irqstack_early_init();
> exc_lvl_early_init();
> emergency_stack_init();
> diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c
> index 59cd773..fbfdca2 100644
> --- a/arch/powerpc/mm/mmu_context_hash64.c
> +++ b/arch/powerpc/mm/mmu_context_hash64.c
> @@ -86,6 +86,9 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
> spin_lock_init(mm->context.cop_lockp);
> #endif /* CONFIG_PPC_ICSWX */
>
> +#ifdef CONFIG_PPC_64K_PAGES
> + mm->context.pgtable_page = NULL;
> +#endif
> return 0;
> }
>
> @@ -97,13 +100,45 @@ void __destroy_context(int context_id)
> }
> EXPORT_SYMBOL_GPL(__destroy_context);
>
> +#ifdef CONFIG_PPC_64K_PAGES
> +static void destroy_pagetable_page(struct mm_struct *mm)
> +{
> + int count;
> + struct page *page;
> +
> + page = mm->context.pgtable_page;
> + if (!page)
> + return;
> +
> + /* drop all the pending references */
> + count = atomic_read(&page->_mapcount) + 1;
> + /* We allow PTE_FRAG_NR(16) fragments from a PTE page */
> + count = atomic_sub_return(16 - count, &page->_count);
You should really move PTE_FRAG_NR to a header so you can actually use
it here rather than hard coding 16.
It took me a fair while to convince myself that there is no race here
with something altering mapcount and count between the atomic_read()
and the atomic_sub_return(). It could do with a comment to explain
why that is safe.
Re-using the mapcount field for your index also seems odd, and it took
me a while to convince myself that that's safe too. Wouldn't it be
simpler to store a pointer to the next sub-page in the mm_context
instead? You can get from that to the struct page easily enough with a
shift and pfn_to_page().
> + if (!count) {
> + pgtable_page_dtor(page);
> + reset_page_mapcount(page);
> + free_hot_cold_page(page, 0);
It would be nice to use put_page() somehow instead of duplicating its
logic, though I realise the sparc code you've based this on does the
same thing.
> + }
> +}
> +
> +#else
> +static inline void destroy_pagetable_page(struct mm_struct *mm)
> +{
> + return;
> +}
> +#endif
> +
> +
> void destroy_context(struct mm_struct *mm)
> {
> +
> #ifdef CONFIG_PPC_ICSWX
> drop_cop(mm->context.acop, mm);
> kfree(mm->context.cop_lockp);
> mm->context.cop_lockp = NULL;
> #endif /* CONFIG_PPC_ICSWX */
> +
> + destroy_pagetable_page(mm);
> __destroy_context(mm->context.id);
> subpage_prot_free(mm);
> mm->context.id = MMU_NO_CONTEXT;
> diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
> index e212a27..e79840b 100644
> --- a/arch/powerpc/mm/pgtable_64.c
> +++ b/arch/powerpc/mm/pgtable_64.c
> @@ -337,3 +337,140 @@ EXPORT_SYMBOL(__ioremap_at);
> EXPORT_SYMBOL(iounmap);
> EXPORT_SYMBOL(__iounmap);
> EXPORT_SYMBOL(__iounmap_at);
> +
> +#ifdef CONFIG_PPC_64K_PAGES
> +/*
> + * we support 16 fragments per PTE page. This is limited by how many
> + * bits we can pack in page->_mapcount. We use the first half for
> + * tracking the usage for rcu page table free.
> + */
> +#define PTE_FRAG_NR 16
> +/*
> + * We use a 2K PTE page fragment and another 2K for storing
> + * real_pte_t hash index
> + */
> +#define PTE_FRAG_SIZE (2 * PTRS_PER_PTE * sizeof(pte_t))
> +
> +static pte_t *get_from_cache(struct mm_struct *mm)
> +{
> + int index;
> + pte_t *ret = NULL;
> + struct page *page;
> +
> + spin_lock(&mm->page_table_lock);
> + page = mm->context.pgtable_page;
> + if (page) {
> + void *p = page_address(page);
> + index = atomic_add_return(1, &page->_mapcount);
> + ret = (pte_t *) (p + (index * PTE_FRAG_SIZE));
> + /*
> + * If we have taken up all the fragments mark PTE page NULL
> + */
> + if (index == PTE_FRAG_NR - 1)
> + mm->context.pgtable_page = NULL;
> + }
> + spin_unlock(&mm->page_table_lock);
> + return ret;
> +}
> +
> +static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
> +{
> + pte_t *ret = NULL;
> + struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
> + __GFP_REPEAT | __GFP_ZERO);
> + if (!page)
> + return NULL;
> +
> + spin_lock(&mm->page_table_lock);
> + /*
> + * If we find pgtable_page set, we return
> + * the allocated page with single fragement
> + * count.
> + */
> + if (likely(!mm->context.pgtable_page)) {
> + atomic_set(&page->_count, PTE_FRAG_NR);
> + atomic_set(&page->_mapcount, 0);
> + mm->context.pgtable_page = page;
> + }
.. and in the unlikely case where there *is* a pgtable_page already
set, what then? Seems like you should BUG_ON, or at least return NULL
- as it is you will return the first sub-page of that page again,
which is very likely in use.
> + spin_unlock(&mm->page_table_lock);
> +
> + ret = (unsigned long *)page_address(page);
> + if (!kernel)
> + pgtable_page_ctor(page);
> +
> + return ret;
> +}
> +
> +pte_t *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
> +{
> + pte_t *pte;
> +
> + pte = get_from_cache(mm);
> + if (pte)
> + return pte;
> +
> + return __alloc_for_cache(mm, kernel);
> +}
> +
> +void page_table_free(struct mm_struct *mm, unsigned long *table, int kernel)
> +{
> + struct page *page = virt_to_page(table);
> + if (put_page_testzero(page)) {
> + if (!kernel)
> + pgtable_page_dtor(page);
> + reset_page_mapcount(page);
> + free_hot_cold_page(page, 0);
> + }
> +}
> +
> +#ifdef CONFIG_SMP
> +static void page_table_free_rcu(void *table)
> +{
> + struct page *page = virt_to_page(table);
> + if (put_page_testzero(page)) {
> + pgtable_page_dtor(page);
> + reset_page_mapcount(page);
> + free_hot_cold_page(page, 0);
> + }
> +}
> +
> +void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
> +{
> + unsigned long pgf = (unsigned long)table;
> +
> + BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
> + pgf |= shift;
> + tlb_remove_table(tlb, (void *)pgf);
> +}
> +
> +void __tlb_remove_table(void *_table)
> +{
> + void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
> + unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
> +
> + if (!shift)
> + /* PTE page needs special handling */
> + page_table_free_rcu(table);
> + else {
> + BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
> + kmem_cache_free(PGT_CACHE(shift), table);
> + }
> +}
> +#else
> +void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
> +{
> + if (!shift) {
> + /* PTE page needs special handling */
> + struct page *page = virt_to_page(table);
> + if (put_page_testzero(page)) {
> + pgtable_page_dtor(page);
> + reset_page_mapcount(page);
> + free_hot_cold_page(page, 0);
> + }
> + } else {
> + BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
> + kmem_cache_free(PGT_CACHE(shift), table);
> + }
> +}
> +#endif
> +#endif /* CONFIG_PPC_64K_PAGES */
--
David Gibson | I'll have my music baroque, and my code
david AT gibson.dropbear.id.au | minimalist, thank you. NOT _the_ _other_
| _way_ _around_!
http://www.ozlabs.org/~dgibson
[-- Attachment #2: Digital signature --]
[-- Type: application/pgp-signature, Size: 198 bytes --]
next prev parent reply other threads:[~2013-04-10 4:46 UTC|newest]
Thread overview: 73+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-04-04 5:57 [PATCH -V5 00/25] THP support for PPC64 Aneesh Kumar K.V
2013-04-04 5:57 ` [PATCH -V5 01/25] powerpc: Use signed formatting when printing error Aneesh Kumar K.V
2013-04-04 5:57 ` [PATCH -V5 02/25] powerpc: Save DAR and DSISR in pt_regs on MCE Aneesh Kumar K.V
2013-04-04 5:57 ` [PATCH -V5 03/25] powerpc: Don't hard code the size of pte page Aneesh Kumar K.V
2013-04-04 5:57 ` [PATCH -V5 04/25] powerpc: Reduce the PTE_INDEX_SIZE Aneesh Kumar K.V
2013-04-11 7:10 ` David Gibson
2013-04-04 5:57 ` [PATCH -V5 05/25] powerpc: Move the pte free routines from common header Aneesh Kumar K.V
2013-04-04 5:57 ` [PATCH -V5 06/25] powerpc: Reduce PTE table memory wastage Aneesh Kumar K.V
2013-04-10 4:46 ` David Gibson [this message]
2013-04-10 6:29 ` Aneesh Kumar K.V
2013-04-10 7:04 ` David Gibson
2013-04-10 7:53 ` Aneesh Kumar K.V
2013-04-10 17:47 ` Aneesh Kumar K.V
2013-04-11 1:20 ` David Gibson
2013-04-11 1:12 ` David Gibson
2013-04-10 7:14 ` Michael Ellerman
2013-04-10 7:54 ` Aneesh Kumar K.V
2013-04-10 8:52 ` Aneesh Kumar K.V
2013-04-04 5:57 ` [PATCH -V5 07/25] powerpc: Use encode avpn where we need only avpn values Aneesh Kumar K.V
2013-04-04 5:57 ` [PATCH -V5 08/25] powerpc: Decode the pte-lp-encoding bits correctly Aneesh Kumar K.V
2013-04-10 7:19 ` David Gibson
2013-04-10 8:11 ` Aneesh Kumar K.V
2013-04-10 17:49 ` Aneesh Kumar K.V
2013-04-11 1:28 ` David Gibson
2013-04-04 5:57 ` [PATCH -V5 09/25] powerpc: Fix hpte_decode to use the correct decoding for page sizes Aneesh Kumar K.V
2013-04-11 3:20 ` David Gibson
2013-04-04 5:57 ` [PATCH -V5 10/25] powerpc: print both base and actual page size on hash failure Aneesh Kumar K.V
2013-04-11 3:21 ` David Gibson
2013-04-04 5:57 ` [PATCH -V5 11/25] powerpc: Print page size info during boot Aneesh Kumar K.V
2013-04-04 5:57 ` [PATCH -V5 12/25] powerpc: Return all the valid pte ecndoing in KVM_PPC_GET_SMMU_INFO ioctl Aneesh Kumar K.V
2013-04-11 3:24 ` David Gibson
2013-04-11 5:11 ` Aneesh Kumar K.V
2013-04-11 5:57 ` David Gibson
2013-04-04 5:57 ` [PATCH -V5 13/25] powerpc: Update tlbie/tlbiel as per ISA doc Aneesh Kumar K.V
2013-04-11 3:30 ` David Gibson
2013-04-11 5:20 ` Aneesh Kumar K.V
2013-04-11 6:16 ` David Gibson
2013-04-11 6:36 ` Aneesh Kumar K.V
2013-04-04 5:57 ` [PATCH -V5 14/25] mm/THP: HPAGE_SHIFT is not a #define on some arch Aneesh Kumar K.V
2013-04-11 3:36 ` David Gibson
2013-04-04 5:57 ` [PATCH -V5 15/25] mm/THP: Add pmd args to pgtable deposit and withdraw APIs Aneesh Kumar K.V
2013-04-11 3:40 ` David Gibson
2013-04-04 5:57 ` [PATCH -V5 16/25] mm/THP: withdraw the pgtable after pmdp related operations Aneesh Kumar K.V
2013-04-04 5:57 ` [PATCH -V5 17/25] powerpc/THP: Implement transparent hugepages for ppc64 Aneesh Kumar K.V
2013-04-11 5:38 ` David Gibson
2013-04-11 7:40 ` Aneesh Kumar K.V
2013-04-12 0:51 ` David Gibson
2013-04-12 5:06 ` Aneesh Kumar K.V
2013-04-12 5:39 ` David Gibson
2013-04-04 5:57 ` [PATCH -V5 18/25] powerpc/THP: Double the PMD table size for THP Aneesh Kumar K.V
2013-04-11 6:18 ` David Gibson
2013-04-04 5:57 ` [PATCH -V5 19/25] powerpc/THP: Differentiate THP PMD entries from HUGETLB PMD entries Aneesh Kumar K.V
2013-04-10 7:21 ` Michael Ellerman
2013-04-10 18:26 ` Aneesh Kumar K.V
2013-04-12 1:28 ` David Gibson
2013-04-04 5:57 ` [PATCH -V5 20/25] powerpc/THP: Add code to handle HPTE faults for large pages Aneesh Kumar K.V
2013-04-12 4:01 ` David Gibson
2013-04-04 5:57 ` [PATCH -V5 21/25] powerpc: Handle hugepage in perf callchain Aneesh Kumar K.V
2013-04-12 1:34 ` David Gibson
2013-04-12 5:05 ` Aneesh Kumar K.V
2013-04-04 5:58 ` [PATCH -V5 22/25] powerpc/THP: get_user_pages_fast changes Aneesh Kumar K.V
2013-04-12 1:41 ` David Gibson
2013-04-04 5:58 ` [PATCH -V5 23/25] powerpc/THP: Enable THP on PPC64 Aneesh Kumar K.V
2013-04-04 5:58 ` [PATCH -V5 24/25] powerpc: Optimize hugepage invalidate Aneesh Kumar K.V
2013-04-12 4:21 ` David Gibson
2013-04-14 10:02 ` Aneesh Kumar K.V
2013-04-15 1:18 ` David Gibson
2013-04-04 5:58 ` [PATCH -V5 25/25] powerpc: Handle hugepages in kvm Aneesh Kumar K.V
2013-04-04 6:00 ` [PATCH -V5 00/25] THP support for PPC64 Simon Jeons
2013-04-04 6:10 ` Aneesh Kumar K.V
2013-04-04 6:14 ` Simon Jeons
2013-04-04 8:38 ` Aneesh Kumar K.V
2013-04-19 1:55 ` Simon Jeons
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20130410044611.GF8165@truffula.fritz.box \
--to=dwg@au1.ibm.com \
--cc=aneesh.kumar@linux.vnet.ibm.com \
--cc=linux-mm@kvack.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=paulus@samba.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).