From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
To: benh@kernel.crashing.org, paulus@samba.org, mpe@ellerman.id.au
Cc: linuxppc-dev@lists.ozlabs.org,
"Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
Subject: [PATCH 45/65] powerpc/mm: Make a copy of pgalloc.h for 32 and 64 book3s
Date: Sun, 27 Mar 2016 13:53:53 +0530 [thread overview]
Message-ID: <1459067053-10835-45-git-send-email-aneesh.kumar@linux.vnet.ibm.com> (raw)
In-Reply-To: <1459067053-10835-1-git-send-email-aneesh.kumar@linux.vnet.ibm.com>
This patch start to make a book3s variant for pgalloc headers. We have
multiple book3s specific chanages such as
* 4 level page table
* store physical address in higher level table
* use pte_t * for pgtable_t
Having a book3s 64 specific variant helps to keep code simpler and
remove lots of #ifdef around code.
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
arch/powerpc/include/asm/book3s/32/pgalloc.h | 109 +++++++++++
arch/powerpc/include/asm/book3s/64/pgalloc.h | 266 +++++++++++++++++++++++++++
2 files changed, 375 insertions(+)
create mode 100644 arch/powerpc/include/asm/book3s/32/pgalloc.h
create mode 100644 arch/powerpc/include/asm/book3s/64/pgalloc.h
diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h
new file mode 100644
index 000000000000..76d6b9e0c8a9
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h
@@ -0,0 +1,109 @@
+#ifndef _ASM_POWERPC_PGALLOC_32_H
+#define _ASM_POWERPC_PGALLOC_32_H
+
+#include <linux/threads.h>
+
+/* For 32-bit, all levels of page tables are just drawn from get_free_page() */
+#define MAX_PGTABLE_INDEX_SIZE 0
+
+extern void __bad_pte(pmd_t *pmd);
+
+extern pgd_t *pgd_alloc(struct mm_struct *mm);
+extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
+
+/*
+ * We don't have any real pmd's, and this code never triggers because
+ * the pgd will always be present..
+ */
+/* #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) */
+#define pmd_free(mm, x) do { } while (0)
+#define __pmd_free_tlb(tlb,x,a) do { } while (0)
+/* #define pgd_populate(mm, pmd, pte) BUG() */
+
+#ifndef CONFIG_BOOKE
+
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
+ pte_t *pte)
+{
+ *pmdp = __pmd(__pa(pte) | _PMD_PRESENT);
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
+ pgtable_t pte_page)
+{
+ *pmdp = __pmd((page_to_pfn(pte_page) << PAGE_SHIFT) | _PMD_PRESENT);
+}
+
+#define pmd_pgtable(pmd) pmd_page(pmd)
+#else
+
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
+ pte_t *pte)
+{
+ *pmdp = __pmd((unsigned long)pte | _PMD_PRESENT);
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
+ pgtable_t pte_page)
+{
+ *pmdp = __pmd((unsigned long)lowmem_page_address(pte_page) | _PMD_PRESENT);
+}
+
+#define pmd_pgtable(pmd) pmd_page(pmd)
+#endif
+
+extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
+extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
+{
+ pgtable_page_dtor(ptepage);
+ __free_page(ptepage);
+}
+
+static inline void pgtable_free(void *table, unsigned index_size)
+{
+ BUG_ON(index_size); /* 32-bit doesn't use this */
+ free_page((unsigned long)table);
+}
+
+#define check_pgt_cache() do { } while (0)
+
+#ifdef CONFIG_SMP
+static inline void pgtable_free_tlb(struct mmu_gather *tlb,
+ void *table, int shift)
+{
+ unsigned long pgf = (unsigned long)table;
+ BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
+ pgf |= shift;
+ tlb_remove_table(tlb, (void *)pgf);
+}
+
+static inline void __tlb_remove_table(void *_table)
+{
+ void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
+ unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
+
+ pgtable_free(table, shift);
+}
+#else
+static inline void pgtable_free_tlb(struct mmu_gather *tlb,
+ void *table, int shift)
+{
+ pgtable_free(table, shift);
+}
+#endif
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
+ unsigned long address)
+{
+ tlb_flush_pgtable(tlb, address);
+ pgtable_page_dtor(table);
+ pgtable_free_tlb(tlb, page_address(table), 0);
+}
+#endif /* _ASM_POWERPC_PGALLOC_32_H */
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
new file mode 100644
index 000000000000..8d5fc3ac43da
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -0,0 +1,266 @@
+#ifndef _ASM_POWERPC_PGALLOC_64_H
+#define _ASM_POWERPC_PGALLOC_64_H
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/slab.h>
+#include <linux/cpumask.h>
+#include <linux/percpu.h>
+
+struct vmemmap_backing {
+ struct vmemmap_backing *list;
+ unsigned long phys;
+ unsigned long virt_addr;
+};
+extern struct vmemmap_backing *vmemmap_list;
+
+/*
+ * Functions that deal with pagetables that could be at any level of
+ * the table need to be passed an "index_size" so they know how to
+ * handle allocation. For PTE pages (which are linked to a struct
+ * page for now, and drawn from the main get_free_pages() pool), the
+ * allocation size will be (2^index_size * sizeof(pointer)) and
+ * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
+ *
+ * The maximum index size needs to be big enough to allow any
+ * pagetable sizes we need, but small enough to fit in the low bits of
+ * any page table pointer. In other words all pagetables, even tiny
+ * ones, must be aligned to allow at least enough low 0 bits to
+ * contain this value. This value is also used as a mask, so it must
+ * be one less than a power of two.
+ */
+#define MAX_PGTABLE_INDEX_SIZE 0xf
+
+extern struct kmem_cache *pgtable_cache[];
+#define PGT_CACHE(shift) ({ \
+ BUG_ON(!(shift)); \
+ pgtable_cache[(shift) - 1]; \
+ })
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL);
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
+}
+
+#ifndef CONFIG_PPC_64K_PAGES
+
+#define pgd_populate(MM, PGD, PUD) pgd_set(PGD, __pgtable_ptr_val(PUD))
+
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
+ GFP_KERNEL|__GFP_REPEAT);
+}
+
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+{
+ kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud);
+}
+
+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+ pud_set(pud, __pgtable_ptr_val(pmd));
+}
+
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+ pte_t *pte)
+{
+ pmd_set(pmd, __pgtable_ptr_val(pte));
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
+ pgtable_t pte_page)
+{
+ pmd_set(pmd, __pgtable_ptr_val(page_address(pte_page)));
+}
+
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ unsigned long address)
+{
+ return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
+}
+
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
+ unsigned long address)
+{
+ struct page *page;
+ pte_t *pte;
+
+ pte = pte_alloc_one_kernel(mm, address);
+ if (!pte)
+ return NULL;
+ page = virt_to_page(pte);
+ if (!pgtable_page_ctor(page)) {
+ __free_page(page);
+ return NULL;
+ }
+ return page;
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
+{
+ pgtable_page_dtor(ptepage);
+ __free_page(ptepage);
+}
+
+static inline void pgtable_free(void *table, unsigned index_size)
+{
+ if (!index_size)
+ free_page((unsigned long)table);
+ else {
+ BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
+ kmem_cache_free(PGT_CACHE(index_size), table);
+ }
+}
+
+#ifdef CONFIG_SMP
+static inline void pgtable_free_tlb(struct mmu_gather *tlb,
+ void *table, int shift)
+{
+ unsigned long pgf = (unsigned long)table;
+ BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
+ pgf |= shift;
+ tlb_remove_table(tlb, (void *)pgf);
+}
+
+static inline void __tlb_remove_table(void *_table)
+{
+ void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
+ unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
+
+ pgtable_free(table, shift);
+}
+#else /* !CONFIG_SMP */
+static inline void pgtable_free_tlb(struct mmu_gather *tlb,
+ void *table, int shift)
+{
+ pgtable_free(table, shift);
+}
+#endif /* CONFIG_SMP */
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
+ unsigned long address)
+{
+ tlb_flush_pgtable(tlb, address);
+ pgtable_page_dtor(table);
+ pgtable_free_tlb(tlb, page_address(table), 0);
+}
+
+#else /* if CONFIG_PPC_64K_PAGES */
+
+extern pte_t *page_table_alloc(struct mm_struct *, unsigned long, int);
+extern void page_table_free(struct mm_struct *, unsigned long *, int);
+extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
+#ifdef CONFIG_SMP
+extern void __tlb_remove_table(void *_table);
+#endif
+
+#ifndef __PAGETABLE_PUD_FOLDED
+/* book3s 64 is 4 level page table */
+static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+{
+ pgd_set(pgd, __pgtable_ptr_val(pud));
+}
+
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
+ GFP_KERNEL|__GFP_REPEAT);
+}
+
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+{
+ kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud);
+}
+#endif
+
+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+ pud_set(pud, __pgtable_ptr_val(pmd));
+}
+
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+ pte_t *pte)
+{
+ pmd_set(pmd, __pgtable_ptr_val(pte));
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
+ pgtable_t pte_page)
+{
+ pmd_set(pmd, __pgtable_ptr_val(pte_page));
+}
+
+static inline pgtable_t pmd_pgtable(pmd_t pmd)
+{
+ return (pgtable_t)pmd_page_vaddr(pmd);
+}
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ unsigned long address)
+{
+ return (pte_t *)page_table_alloc(mm, address, 1);
+}
+
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
+ unsigned long address)
+{
+ return (pgtable_t)page_table_alloc(mm, address, 0);
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ page_table_free(mm, (unsigned long *)pte, 1);
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
+{
+ page_table_free(mm, (unsigned long *)ptepage, 0);
+}
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
+ unsigned long address)
+{
+ tlb_flush_pgtable(tlb, address);
+ pgtable_free_tlb(tlb, table, 0);
+}
+#endif /* CONFIG_PPC_64K_PAGES */
+
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
+ GFP_KERNEL|__GFP_REPEAT);
+}
+
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+ kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
+}
+
+#define __pmd_free_tlb(tlb, pmd, addr) \
+ pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX)
+#ifndef __PAGETABLE_PUD_FOLDED
+#define __pud_free_tlb(tlb, pud, addr) \
+ pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE)
+
+#endif /* __PAGETABLE_PUD_FOLDED */
+
+#define check_pgt_cache() do { } while (0)
+
+#endif /* _ASM_POWERPC_PGALLOC_64_H */
--
2.5.0
next prev parent reply other threads:[~2016-03-27 8:26 UTC|newest]
Thread overview: 92+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-03-27 8:23 [PATCH 01/65] powerpc/mm: Use big endian page table for book3s 64 Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 02/65] powerpc/mm: use _PAGE_READ to indicate Read access Aneesh Kumar K.V
2016-03-27 15:35 ` Ian Munsie
2016-03-31 0:57 ` Balbir Singh
2016-04-04 14:55 ` Aneesh Kumar K.V
2016-04-05 3:25 ` Balbir Singh
2016-03-27 8:23 ` [PATCH 03/65] powerpc/mm/subpage: Clear RWX bit to indicate no access Aneesh Kumar K.V
2016-03-31 1:42 ` Balbir Singh
2016-04-04 14:59 ` Aneesh Kumar K.V
2016-04-05 3:30 ` Balbir Singh
2016-03-27 8:23 ` [PATCH 04/65] powerpc/mm: Use pte_user instead of opencoding Aneesh Kumar K.V
2016-03-31 1:45 ` Balbir Singh
2016-04-04 15:00 ` Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 05/65] powerpc/mm: Replace _PAGE_USER with _PAGE_PRIVILEGED Aneesh Kumar K.V
2016-03-27 15:41 ` Ian Munsie
2016-03-27 8:23 ` [PATCH 06/65] powerpc/cxl: Use REGION_ID instead of opencoding Aneesh Kumar K.V
2016-03-27 15:48 ` Ian Munsie
2016-03-27 8:23 ` [PATCH 07/65] powerpc/mm: Remove RPN_SHIFT and RPN_SIZE Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 08/65] powerpc/mm: Update _PAGE_KERNEL_RO Aneesh Kumar K.V
2016-04-05 7:45 ` Balbir Singh
2016-03-27 8:23 ` [PATCH 09/65] powerpc/mm: Use helper for finding pte bits mapping I/O area Aneesh Kumar K.V
2016-04-05 11:47 ` Balbir Singh
2016-03-27 8:23 ` [PATCH 10/65] powerpc/mm: Drop WIMG in favour of new constants Aneesh Kumar K.V
2016-04-05 13:25 ` Balbir Singh
2016-03-27 8:23 ` [PATCH 11/65] powerpc/mm: Use generic version of pmdp_clear_flush_young Aneesh Kumar K.V
2016-04-05 23:51 ` Balbir Singh
2016-03-27 8:23 ` [PATCH 12/65] powerpc/mm: Use generic version of ptep_clear_flush_young Aneesh Kumar K.V
2016-04-05 23:53 ` Balbir Singh
2016-03-27 8:23 ` [PATCH 13/65] powerpc/mm: Move common data structure between radix and hash to book3s 64 generic headers Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 14/65] powerpc/mm/power9: Add partition table format Aneesh Kumar K.V
2016-04-06 6:11 ` Balbir Singh
2016-03-27 8:23 ` [PATCH 15/65] powerpc/mm/hash: Add support for POWER9 hash Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 16/65] powerpc/mm: Move hash and no hash code to separate files Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 17/65] powerpc/mm/book3s: Rename hash specific PTE bits to carry H_ prefix Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 18/65] powerpc/mm: Handle _PTE_NONE_MASK Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 19/65] powerpc/mm: Move common pte bits and accessors to book3s/64/pgtable.h Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 20/65] powerpc/mm: Move pte accessors that operate on common pte bits to pgtable.h Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 21/65] powerpc/mm: Make page table size a variable Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 22/65] powerpc/mm: Move page table index and and vaddr to pgtable.h Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 23/65] powerpc/mm: Move pte related function together Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 24/65] powerpc/mm/radix: Add radix pte defines Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 25/65] powerpc/mm/radix: Dummy radix_enabled() Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 26/65] powerpc/mm: Add radix callbacks to pte accessors Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 27/65] powerpc/mm: Move hugetlb and THP related pmd accessors to pgtable.h Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 28/65] powerpc/mm/radix: Add radix callback for pmd accessors Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 29/65] powerpc/mm: Abstraction for early init routines Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 30/65] powerpc/mm/radix: Add radix callback " Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 31/65] powerpc/mm: Abstraction for vmemmap and map_kernel_page Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 32/65] powerpc/mm/radix: Add radix callback for vmemmap and map_kernel page Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 33/65] powerpc/mm: Abstraction for switch_mmu_context Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 34/65] powerpc/mm/radix: Add mmu context handling callback for radix Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 35/65] powerpc/mm: Rename mmu_context_hash64.c to mmu_context_book3s64.c Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 36/65] powerpc/mm: Hash linux abstraction for tlbflush routines Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 37/65] powerpc/mm/radix: Add " Aneesh Kumar K.V
2016-03-27 10:00 ` kbuild test robot
2016-03-28 17:58 ` Aneesh Kumar K.V
2016-03-27 10:09 ` kbuild test robot
2016-03-27 10:11 ` kbuild test robot
2016-03-27 8:23 ` [PATCH 38/65] powerpc/mm/radix: Add MMU_FTR_RADIX Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 39/65] powerpc/mm/radix: Use STD_MMU_64 to properly isolate hash related code Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 40/65] powerpc/mm/radix: Isolate hash table function from pseries guest code Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 41/65] powerpc/mm/radix: Add checks in slice code to catch radix usage Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 42/65] powerpc/mm/radix: Limit paca allocation in radix Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 43/65] powerpc/mm/radix: Pick the address layout for radix config Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 44/65] powerpc/mm/radix: Update secondary PTCR Aneesh Kumar K.V
2016-03-27 8:23 ` Aneesh Kumar K.V [this message]
2016-03-27 8:23 ` [PATCH 46/65] powerpc/mm: revert changes made to generic pgalloc-64.h Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 47/65] powerpc/mm: Copy pgalloc (part 2) Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 48/65] powerpc/mm: Simplify the code dropping 4 level table #ifdef Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 49/65] powerpc/mm: Rename function to indicate we are allocating fragments Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 50/65] powerpc/mm: make 4k and 64k use pte_t for pgtable_t Aneesh Kumar K.V
2016-03-27 8:23 ` [PATCH 51/65] powerpc/mm: Add radix pgalloc details Aneesh Kumar K.V
2016-03-27 8:24 ` [PATCH 52/65] powerpc/mm: Update pte filter for radix Aneesh Kumar K.V
2016-03-27 8:24 ` [PATCH 53/65] powerpc/mm: VMALLOC abstraction Aneesh Kumar K.V
2016-03-27 8:24 ` [PATCH 54/65] powerpc/radix: update mmu cache Aneesh Kumar K.V
2016-03-27 8:24 ` [PATCH 55/65] powerpc/mm: pte_frag abstraction Aneesh Kumar K.V
2016-03-27 8:24 ` [PATCH 56/65] powerpc/mm: Fix vma_mmu_pagesize for radix Aneesh Kumar K.V
2016-03-27 8:24 ` [PATCH 57/65] powerpc/mm: Add radix support for hugetlb Aneesh Kumar K.V
2016-03-27 8:24 ` [PATCH 58/65] powerpc/mm/radix: Make sure swapper pgdir is properly aligned Aneesh Kumar K.V
2016-03-27 8:24 ` [PATCH 59/65] powerpc/mm/radix: Add hugetlb support 4K page size Aneesh Kumar K.V
2016-03-27 8:24 ` [PATCH 60/65] powerpc/mm: Drop PTE_ATOMIC_UPDATES from pmd_hugepage_update Aneesh Kumar K.V
2016-03-27 8:24 ` [PATCH 61/65] powerpc/mm: THP is only available on hash64 as of now Aneesh Kumar K.V
2016-03-27 8:24 ` [PATCH 62/65] powerpc/mm/thp: Abstraction for THP functions Aneesh Kumar K.V
2016-03-27 8:24 ` [PATCH 63/65] powerpc/mm/radix: Add radix THP callbacks Aneesh Kumar K.V
2016-03-27 8:24 ` [PATCH 64/65] powerpc/mm/radix: Add THP support for 4k linux page size Aneesh Kumar K.V
2016-03-27 8:24 ` [PATCH 65/65] powerpc/mm/radix: Cputable update for radix Aneesh Kumar K.V
2016-03-30 1:01 ` Michael Neuling
2016-04-01 6:25 ` Michael Ellerman
2016-04-01 9:34 ` Aneesh Kumar K.V
2018-06-28 23:55 ` Benjamin Herrenschmidt
2016-03-30 10:53 ` [PATCH 01/65] powerpc/mm: Use big endian page table for book3s 64 Balbir Singh
2016-04-04 15:03 ` Aneesh Kumar K.V
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1459067053-10835-45-git-send-email-aneesh.kumar@linux.vnet.ibm.com \
--to=aneesh.kumar@linux.vnet.ibm.com \
--cc=benh@kernel.crashing.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=mpe@ellerman.id.au \
--cc=paulus@samba.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).