* Re: [PATCH 1/2] mm, thp: introduce dedicated transparent huge page allocation interfaces
2017-10-16 9:19 ` [PATCH 1/2] " changbin.du
@ 2017-10-17 8:08 ` Anshuman Khandual
2017-10-17 9:16 ` Du, Changbin
2017-10-17 10:20 ` Michal Hocko
` (3 subsequent siblings)
4 siblings, 1 reply; 21+ messages in thread
From: Anshuman Khandual @ 2017-10-17 8:08 UTC (permalink / raw)
To: changbin.du, akpm, corbet, hughd; +Cc: linux-doc, linux-kernel, linux-mm
On 10/16/2017 02:49 PM, changbin.du@intel.com wrote:
> From: Changbin Du <changbin.du@intel.com>
>
> This patch introduced 4 new interfaces to allocate a prepared
> transparent huge page.
> - alloc_transhuge_page_vma
> - alloc_transhuge_page_nodemask
> - alloc_transhuge_page_node
> - alloc_transhuge_page
>
If we are trying to match HugeTLB helpers, then it should have
format something like alloc_transhugepage_xxx instead of
alloc_transhuge_page_XXX. But I think its okay.
> The aim is to remove duplicated code and simplify transparent
> huge page allocation. These are similar to alloc_hugepage_xxx
> which are for hugetlbfs pages. This patch does below changes:
> - define alloc_transhuge_page_xxx interfaces
> - apply them to all existing code
> - declare prep_transhuge_page as static since no others use it
> - remove alloc_hugepage_vma definition since it no longer has users
>
> Signed-off-by: Changbin Du <changbin.du@intel.com>
> ---
> include/linux/gfp.h | 4 ----
> include/linux/huge_mm.h | 13 ++++++++++++-
> include/linux/migrate.h | 14 +++++---------
> mm/huge_memory.c | 50 ++++++++++++++++++++++++++++++++++++++++++-------
> mm/khugepaged.c | 11 ++---------
> mm/mempolicy.c | 10 +++-------
> mm/migrate.c | 12 ++++--------
> mm/shmem.c | 6 ++----
> 8 files changed, 71 insertions(+), 49 deletions(-)
>
> diff --git a/include/linux/gfp.h b/include/linux/gfp.h
> index f780718..855c72e 100644
> --- a/include/linux/gfp.h
> +++ b/include/linux/gfp.h
> @@ -507,15 +507,11 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
> extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
> struct vm_area_struct *vma, unsigned long addr,
> int node, bool hugepage);
> -#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
> - alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
> #else
> #define alloc_pages(gfp_mask, order) \
> alloc_pages_node(numa_node_id(), gfp_mask, order)
> #define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
> alloc_pages(gfp_mask, order)
> -#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
> - alloc_pages(gfp_mask, order)
> #endif
> #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
> #define alloc_page_vma(gfp_mask, vma, addr) \
> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
> index 14bc21c..1dd2c33 100644
> --- a/include/linux/huge_mm.h
> +++ b/include/linux/huge_mm.h
> @@ -130,9 +130,20 @@ extern unsigned long thp_get_unmapped_area(struct file *filp,
> unsigned long addr, unsigned long len, unsigned long pgoff,
> unsigned long flags);
>
> -extern void prep_transhuge_page(struct page *page);
> extern void free_transhuge_page(struct page *page);
>
> +struct page *alloc_transhuge_page_vma(gfp_t gfp_mask,
> + struct vm_area_struct *vma, unsigned long addr);
> +struct page *alloc_transhuge_page_nodemask(gfp_t gfp_mask,
> + int preferred_nid, nodemask_t *nmask);
Would not they require 'extern' here ?
> +
> +static inline struct page *alloc_transhuge_page_node(int nid, gfp_t gfp_mask)
> +{
> + return alloc_transhuge_page_nodemask(gfp_mask, nid, NULL);
> +}
> +
> +struct page *alloc_transhuge_page(gfp_t gfp_mask);
> +
> bool can_split_huge_page(struct page *page, int *pextra_pins);
> int split_huge_page_to_list(struct page *page, struct list_head *list);
> static inline int split_huge_page(struct page *page)
> diff --git a/include/linux/migrate.h b/include/linux/migrate.h
> index 643c7ae..70a00f3 100644
> --- a/include/linux/migrate.h
> +++ b/include/linux/migrate.h
> @@ -42,19 +42,15 @@ static inline struct page *new_page_nodemask(struct page *page,
> return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
> preferred_nid, nodemask);
>
> - if (thp_migration_supported() && PageTransHuge(page)) {
> - order = HPAGE_PMD_ORDER;
> - gfp_mask |= GFP_TRANSHUGE;
> - }
> -
> if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
> gfp_mask |= __GFP_HIGHMEM;
>
> - new_page = __alloc_pages_nodemask(gfp_mask, order,
> + if (thp_migration_supported() && PageTransHuge(page))
> + return alloc_transhuge_page_nodemask(gfp_mask | GFP_TRANSHUGE,
> + preferred_nid, nodemask);
> + else
> + return __alloc_pages_nodemask(gfp_mask, order,
> preferred_nid, nodemask);
> -
> - if (new_page && PageTransHuge(page))
> - prep_transhuge_page(new_page);
This makes sense, calling prep_transhuge_page() inside the
function alloc_transhuge_page_nodemask() is better I guess.
>
> return new_page;
> }
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 269b5df..e267488 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -490,7 +490,7 @@ static inline struct list_head *page_deferred_list(struct page *page)
> return (struct list_head *)&page[2].mapping;
> }
>
> -void prep_transhuge_page(struct page *page)
> +static void prep_transhuge_page(struct page *page)
Right. It wont be used outside huge page allocation context and
you have already mentioned about it.
> {
> /*
> * we use page->mapping and page->indexlru in second tail page
> @@ -501,6 +501,45 @@ void prep_transhuge_page(struct page *page)
> set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
> }
>
> +struct page *alloc_transhuge_page_vma(gfp_t gfp_mask,
> + struct vm_area_struct *vma, unsigned long addr)
> +{
> + struct page *page;
> +
> + page = alloc_pages_vma(gfp_mask | __GFP_COMP, HPAGE_PMD_ORDER,
> + vma, addr, numa_node_id(), true);
> + if (unlikely(!page))
> + return NULL;
> + prep_transhuge_page(page);
> + return page;
> +}
__GFP_COMP and HPAGE_PMD_ORDER are the minimum flags which will be used
for huge page allocation and preparation. Any thing else depending upon
the context will be passed by the caller. Makes sense.
> +
> +struct page *alloc_transhuge_page_nodemask(gfp_t gfp_mask,
> + int preferred_nid, nodemask_t *nmask)
> +{
> + struct page *page;
> +
> + page = __alloc_pages_nodemask(gfp_mask | __GFP_COMP, HPAGE_PMD_ORDER,
> + preferred_nid, nmask);
> + if (unlikely(!page))
> + return NULL;
> + prep_transhuge_page(page);
> + return page;
> +}
> +
Same here.
> +struct page *alloc_transhuge_page(gfp_t gfp_mask)
> +{
> + struct page *page;
> +
> + VM_BUG_ON(!(gfp_mask & __GFP_COMP));
You expect the caller to provide __GFP_COMP, why ? You are
anyways providing it later.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH 1/2] mm, thp: introduce dedicated transparent huge page allocation interfaces
2017-10-17 8:08 ` Anshuman Khandual
@ 2017-10-17 9:16 ` Du, Changbin
0 siblings, 0 replies; 21+ messages in thread
From: Du, Changbin @ 2017-10-17 9:16 UTC (permalink / raw)
To: Anshuman Khandual
Cc: changbin.du, akpm, corbet, hughd, linux-doc, linux-kernel,
linux-mm
[-- Attachment #1: Type: text/plain, Size: 5502 bytes --]
Hi Khandual,
Thanks for your review.
On Tue, Oct 17, 2017 at 01:38:07PM +0530, Anshuman Khandual wrote:
> On 10/16/2017 02:49 PM, changbin.du@intel.com wrote:
> > From: Changbin Du <changbin.du@intel.com>
> >
> > This patch introduced 4 new interfaces to allocate a prepared
> > transparent huge page.
> > - alloc_transhuge_page_vma
> > - alloc_transhuge_page_nodemask
> > - alloc_transhuge_page_node
> > - alloc_transhuge_page
> >
>
> If we are trying to match HugeTLB helpers, then it should have
> format something like alloc_transhugepage_xxx instead of
> alloc_transhuge_page_XXX. But I think its okay.
>
HugeTLB helpers are something like alloc_huge_page, so I think
alloc_transhuge_page match it. And existing names already have
*transhuge_page* style.
> > diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
> > index 14bc21c..1dd2c33 100644
> > --- a/include/linux/huge_mm.h
> > +++ b/include/linux/huge_mm.h
> > @@ -130,9 +130,20 @@ extern unsigned long thp_get_unmapped_area(struct file *filp,
> > unsigned long addr, unsigned long len, unsigned long pgoff,
> > unsigned long flags);
> >
> > -extern void prep_transhuge_page(struct page *page);
> > extern void free_transhuge_page(struct page *page);
> >
> > +struct page *alloc_transhuge_page_vma(gfp_t gfp_mask,
> > + struct vm_area_struct *vma, unsigned long addr);
> > +struct page *alloc_transhuge_page_nodemask(gfp_t gfp_mask,
> > + int preferred_nid, nodemask_t *nmask);
>
> Would not they require 'extern' here ?
>
Need or not, function declaration are implicitly 'extern'. I will add it to
align with existing code.
> > +
> > +static inline struct page *alloc_transhuge_page_node(int nid, gfp_t gfp_mask)
> > +{
> > + return alloc_transhuge_page_nodemask(gfp_mask, nid, NULL);
> > +}
> > +
> > +struct page *alloc_transhuge_page(gfp_t gfp_mask);
> > +
> > bool can_split_huge_page(struct page *page, int *pextra_pins);
> > int split_huge_page_to_list(struct page *page, struct list_head *list);
> > static inline int split_huge_page(struct page *page)
> > diff --git a/include/linux/migrate.h b/include/linux/migrate.h
> > index 643c7ae..70a00f3 100644
> > --- a/include/linux/migrate.h
> > +++ b/include/linux/migrate.h
> > @@ -42,19 +42,15 @@ static inline struct page *new_page_nodemask(struct page *page,
> > return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
> > preferred_nid, nodemask);
> >
> > - if (thp_migration_supported() && PageTransHuge(page)) {
> > - order = HPAGE_PMD_ORDER;
> > - gfp_mask |= GFP_TRANSHUGE;
> > - }
> > -
> > if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
> > gfp_mask |= __GFP_HIGHMEM;
> >
> > - new_page = __alloc_pages_nodemask(gfp_mask, order,
> > + if (thp_migration_supported() && PageTransHuge(page))
> > + return alloc_transhuge_page_nodemask(gfp_mask | GFP_TRANSHUGE,
> > + preferred_nid, nodemask);
> > + else
> > + return __alloc_pages_nodemask(gfp_mask, order,
> > preferred_nid, nodemask);
> > -
> > - if (new_page && PageTransHuge(page))
> > - prep_transhuge_page(new_page);
>
> This makes sense, calling prep_transhuge_page() inside the
> function alloc_transhuge_page_nodemask() is better I guess.
>
> >
> > return new_page;
> > }
> > diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> > index 269b5df..e267488 100644
> > --- a/mm/huge_memory.c
> > +++ b/mm/huge_memory.c
> > @@ -490,7 +490,7 @@ static inline struct list_head *page_deferred_list(struct page *page)
> > return (struct list_head *)&page[2].mapping;
> > }
> >
> > -void prep_transhuge_page(struct page *page)
> > +static void prep_transhuge_page(struct page *page)
>
> Right. It wont be used outside huge page allocation context and
> you have already mentioned about it.
>
> > {
> > /*
> > * we use page->mapping and page->indexlru in second tail page
> > @@ -501,6 +501,45 @@ void prep_transhuge_page(struct page *page)
> > set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
> > }
> >
> > +struct page *alloc_transhuge_page_vma(gfp_t gfp_mask,
> > + struct vm_area_struct *vma, unsigned long addr)
> > +{
> > + struct page *page;
> > +
> > + page = alloc_pages_vma(gfp_mask | __GFP_COMP, HPAGE_PMD_ORDER,
> > + vma, addr, numa_node_id(), true);
> > + if (unlikely(!page))
> > + return NULL;
> > + prep_transhuge_page(page);
> > + return page;
> > +}
>
> __GFP_COMP and HPAGE_PMD_ORDER are the minimum flags which will be used
> for huge page allocation and preparation. Any thing else depending upon
> the context will be passed by the caller. Makes sense.
>
yes, thanks.
> > +
> > +struct page *alloc_transhuge_page_nodemask(gfp_t gfp_mask,
> > + int preferred_nid, nodemask_t *nmask)
> > +{
> > + struct page *page;
> > +
> > + page = __alloc_pages_nodemask(gfp_mask | __GFP_COMP, HPAGE_PMD_ORDER,
> > + preferred_nid, nmask);
> > + if (unlikely(!page))
> > + return NULL;
> > + prep_transhuge_page(page);
> > + return page;
> > +}
> > +
>
> Same here.
>
> > +struct page *alloc_transhuge_page(gfp_t gfp_mask)
> > +{
> > + struct page *page;
> > +
> > + VM_BUG_ON(!(gfp_mask & __GFP_COMP));
>
> You expect the caller to provide __GFP_COMP, why ? You are
> anyways providing it later.
>
oops, I forgot to update this line. Will remove it. Thanks for figuring it out.
--
Thanks,
Changbin Du
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 473 bytes --]
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH 1/2] mm, thp: introduce dedicated transparent huge page allocation interfaces
2017-10-16 9:19 ` [PATCH 1/2] " changbin.du
2017-10-17 8:08 ` Anshuman Khandual
@ 2017-10-17 10:20 ` Michal Hocko
2017-10-17 10:22 ` Michal Hocko
2017-10-18 11:00 ` Du, Changbin
2017-10-17 11:12 ` Kirill A. Shutemov
` (2 subsequent siblings)
4 siblings, 2 replies; 21+ messages in thread
From: Michal Hocko @ 2017-10-17 10:20 UTC (permalink / raw)
To: changbin.du; +Cc: akpm, corbet, hughd, linux-doc, linux-kernel, linux-mm
[CC Kirill]
On Mon 16-10-17 17:19:16, changbin.du@intel.com wrote:
> From: Changbin Du <changbin.du@intel.com>
>
> This patch introduced 4 new interfaces to allocate a prepared
> transparent huge page.
> - alloc_transhuge_page_vma
> - alloc_transhuge_page_nodemask
> - alloc_transhuge_page_node
> - alloc_transhuge_page
>
> The aim is to remove duplicated code and simplify transparent
> huge page allocation. These are similar to alloc_hugepage_xxx
> which are for hugetlbfs pages. This patch does below changes:
> - define alloc_transhuge_page_xxx interfaces
> - apply them to all existing code
> - declare prep_transhuge_page as static since no others use it
> - remove alloc_hugepage_vma definition since it no longer has users
So what exactly is the advantage of the new API? The diffstat doesn't
sound very convincing to me.
> Signed-off-by: Changbin Du <changbin.du@intel.com>
> ---
> include/linux/gfp.h | 4 ----
> include/linux/huge_mm.h | 13 ++++++++++++-
> include/linux/migrate.h | 14 +++++---------
> mm/huge_memory.c | 50 ++++++++++++++++++++++++++++++++++++++++++-------
> mm/khugepaged.c | 11 ++---------
> mm/mempolicy.c | 10 +++-------
> mm/migrate.c | 12 ++++--------
> mm/shmem.c | 6 ++----
> 8 files changed, 71 insertions(+), 49 deletions(-)
>
> diff --git a/include/linux/gfp.h b/include/linux/gfp.h
> index f780718..855c72e 100644
> --- a/include/linux/gfp.h
> +++ b/include/linux/gfp.h
> @@ -507,15 +507,11 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
> extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
> struct vm_area_struct *vma, unsigned long addr,
> int node, bool hugepage);
> -#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
> - alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
> #else
> #define alloc_pages(gfp_mask, order) \
> alloc_pages_node(numa_node_id(), gfp_mask, order)
> #define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
> alloc_pages(gfp_mask, order)
> -#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
> - alloc_pages(gfp_mask, order)
> #endif
> #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
> #define alloc_page_vma(gfp_mask, vma, addr) \
> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
> index 14bc21c..1dd2c33 100644
> --- a/include/linux/huge_mm.h
> +++ b/include/linux/huge_mm.h
> @@ -130,9 +130,20 @@ extern unsigned long thp_get_unmapped_area(struct file *filp,
> unsigned long addr, unsigned long len, unsigned long pgoff,
> unsigned long flags);
>
> -extern void prep_transhuge_page(struct page *page);
> extern void free_transhuge_page(struct page *page);
>
> +struct page *alloc_transhuge_page_vma(gfp_t gfp_mask,
> + struct vm_area_struct *vma, unsigned long addr);
> +struct page *alloc_transhuge_page_nodemask(gfp_t gfp_mask,
> + int preferred_nid, nodemask_t *nmask);
> +
> +static inline struct page *alloc_transhuge_page_node(int nid, gfp_t gfp_mask)
> +{
> + return alloc_transhuge_page_nodemask(gfp_mask, nid, NULL);
> +}
> +
> +struct page *alloc_transhuge_page(gfp_t gfp_mask);
> +
> bool can_split_huge_page(struct page *page, int *pextra_pins);
> int split_huge_page_to_list(struct page *page, struct list_head *list);
> static inline int split_huge_page(struct page *page)
> diff --git a/include/linux/migrate.h b/include/linux/migrate.h
> index 643c7ae..70a00f3 100644
> --- a/include/linux/migrate.h
> +++ b/include/linux/migrate.h
> @@ -42,19 +42,15 @@ static inline struct page *new_page_nodemask(struct page *page,
> return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
> preferred_nid, nodemask);
>
> - if (thp_migration_supported() && PageTransHuge(page)) {
> - order = HPAGE_PMD_ORDER;
> - gfp_mask |= GFP_TRANSHUGE;
> - }
> -
> if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
> gfp_mask |= __GFP_HIGHMEM;
>
> - new_page = __alloc_pages_nodemask(gfp_mask, order,
> + if (thp_migration_supported() && PageTransHuge(page))
> + return alloc_transhuge_page_nodemask(gfp_mask | GFP_TRANSHUGE,
> + preferred_nid, nodemask);
> + else
> + return __alloc_pages_nodemask(gfp_mask, order,
> preferred_nid, nodemask);
> -
> - if (new_page && PageTransHuge(page))
> - prep_transhuge_page(new_page);
>
> return new_page;
> }
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 269b5df..e267488 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -490,7 +490,7 @@ static inline struct list_head *page_deferred_list(struct page *page)
> return (struct list_head *)&page[2].mapping;
> }
>
> -void prep_transhuge_page(struct page *page)
> +static void prep_transhuge_page(struct page *page)
> {
> /*
> * we use page->mapping and page->indexlru in second tail page
> @@ -501,6 +501,45 @@ void prep_transhuge_page(struct page *page)
> set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
> }
>
> +struct page *alloc_transhuge_page_vma(gfp_t gfp_mask,
> + struct vm_area_struct *vma, unsigned long addr)
> +{
> + struct page *page;
> +
> + page = alloc_pages_vma(gfp_mask | __GFP_COMP, HPAGE_PMD_ORDER,
> + vma, addr, numa_node_id(), true);
> + if (unlikely(!page))
> + return NULL;
> + prep_transhuge_page(page);
> + return page;
> +}
> +
> +struct page *alloc_transhuge_page_nodemask(gfp_t gfp_mask,
> + int preferred_nid, nodemask_t *nmask)
> +{
> + struct page *page;
> +
> + page = __alloc_pages_nodemask(gfp_mask | __GFP_COMP, HPAGE_PMD_ORDER,
> + preferred_nid, nmask);
> + if (unlikely(!page))
> + return NULL;
> + prep_transhuge_page(page);
> + return page;
> +}
> +
> +struct page *alloc_transhuge_page(gfp_t gfp_mask)
> +{
> + struct page *page;
> +
> + VM_BUG_ON(!(gfp_mask & __GFP_COMP));
> +
> + page = alloc_pages(gfp_mask | __GFP_COMP, HPAGE_PMD_ORDER);
> + if (unlikely(!page))
> + return NULL;
> + prep_transhuge_page(page);
> + return page;
> +}
> +
> unsigned long __thp_get_unmapped_area(struct file *filp, unsigned long len,
> loff_t off, unsigned long flags, unsigned long size)
> {
> @@ -719,12 +758,11 @@ int do_huge_pmd_anonymous_page(struct vm_fault *vmf)
> return ret;
> }
> gfp = alloc_hugepage_direct_gfpmask(vma);
> - page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
> + page = alloc_transhuge_page_vma(gfp, vma, haddr);
> if (unlikely(!page)) {
> count_vm_event(THP_FAULT_FALLBACK);
> return VM_FAULT_FALLBACK;
> }
> - prep_transhuge_page(page);
> return __do_huge_pmd_anonymous_page(vmf, page, gfp);
> }
>
> @@ -1288,13 +1326,11 @@ int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
> if (transparent_hugepage_enabled(vma) &&
> !transparent_hugepage_debug_cow()) {
> huge_gfp = alloc_hugepage_direct_gfpmask(vma);
> - new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER);
> + new_page = alloc_transhuge_page_vma(huge_gfp, vma, haddr);
> } else
> new_page = NULL;
>
> - if (likely(new_page)) {
> - prep_transhuge_page(new_page);
> - } else {
> + if (unlikely(!new_page)) {
> if (!page) {
> split_huge_pmd(vma, vmf->pmd, vmf->address);
> ret |= VM_FAULT_FALLBACK;
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index c01f177..d17a694 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -745,14 +745,13 @@ khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
> {
> VM_BUG_ON_PAGE(*hpage, *hpage);
>
> - *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
> + *hpage = alloc_transhuge_page_node(node, gfp);
> if (unlikely(!*hpage)) {
> count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
> *hpage = ERR_PTR(-ENOMEM);
> return NULL;
> }
>
> - prep_transhuge_page(*hpage);
> count_vm_event(THP_COLLAPSE_ALLOC);
> return *hpage;
> }
> @@ -764,13 +763,7 @@ static int khugepaged_find_target_node(void)
>
> static inline struct page *alloc_khugepaged_hugepage(void)
> {
> - struct page *page;
> -
> - page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
> - HPAGE_PMD_ORDER);
> - if (page)
> - prep_transhuge_page(page);
> - return page;
> + return alloc_transhuge_page(alloc_hugepage_khugepaged_gfpmask());
> }
>
> static struct page *khugepaged_alloc_hugepage(bool *wait)
> diff --git a/mm/mempolicy.c b/mm/mempolicy.c
> index a2af6d5..aa24285 100644
> --- a/mm/mempolicy.c
> +++ b/mm/mempolicy.c
> @@ -949,12 +949,10 @@ static struct page *new_node_page(struct page *page, unsigned long node, int **x
> else if (thp_migration_supported() && PageTransHuge(page)) {
> struct page *thp;
>
> - thp = alloc_pages_node(node,
> - (GFP_TRANSHUGE | __GFP_THISNODE),
> - HPAGE_PMD_ORDER);
> + thp = alloc_transhuge_page_node(node,
> + (GFP_TRANSHUGE | __GFP_THISNODE));
> if (!thp)
> return NULL;
> - prep_transhuge_page(thp);
> return thp;
> } else
> return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
> @@ -1125,11 +1123,9 @@ static struct page *new_page(struct page *page, unsigned long start, int **x)
> } else if (thp_migration_supported() && PageTransHuge(page)) {
> struct page *thp;
>
> - thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
> - HPAGE_PMD_ORDER);
> + thp = alloc_transhuge_page_vma(GFP_TRANSHUGE, vma, address);
> if (!thp)
> return NULL;
> - prep_transhuge_page(thp);
> return thp;
> }
> /*
> diff --git a/mm/migrate.c b/mm/migrate.c
> index e00814c..7f0486f 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -1472,12 +1472,10 @@ static struct page *new_page_node(struct page *p, unsigned long private,
> else if (thp_migration_supported() && PageTransHuge(p)) {
> struct page *thp;
>
> - thp = alloc_pages_node(pm->node,
> - (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_RECLAIM,
> - HPAGE_PMD_ORDER);
> + thp = alloc_transhuge_page_node(pm->node,
> + (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_RECLAIM);
> if (!thp)
> return NULL;
> - prep_transhuge_page(thp);
> return thp;
> } else
> return __alloc_pages_node(pm->node,
> @@ -2017,12 +2015,10 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
> if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR))
> goto out_dropref;
>
> - new_page = alloc_pages_node(node,
> - (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
> - HPAGE_PMD_ORDER);
> + new_page = alloc_transhuge_page_node(node,
> + (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE));
> if (!new_page)
> goto out_fail;
> - prep_transhuge_page(new_page);
>
> isolated = numamigrate_isolate_page(pgdat, page);
> if (!isolated) {
> diff --git a/mm/shmem.c b/mm/shmem.c
> index 07a1d22..52468f7 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -1444,11 +1444,9 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp,
> rcu_read_unlock();
>
> shmem_pseudo_vma_init(&pvma, info, hindex);
> - page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
> - HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true);
> + gfp |= __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN;
> + page = alloc_transhuge_page_vma(gfp, &pvma, 0);
> shmem_pseudo_vma_destroy(&pvma);
> - if (page)
> - prep_transhuge_page(page);
> return page;
> }
>
> --
> 2.7.4
>
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org. For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
--
Michal Hocko
SUSE Labs
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH 1/2] mm, thp: introduce dedicated transparent huge page allocation interfaces
2017-10-17 10:20 ` Michal Hocko
@ 2017-10-17 10:22 ` Michal Hocko
2017-10-18 11:00 ` Du, Changbin
1 sibling, 0 replies; 21+ messages in thread
From: Michal Hocko @ 2017-10-17 10:22 UTC (permalink / raw)
To: changbin.du
Cc: akpm, corbet, hughd, linux-doc, linux-kernel, linux-mm,
Kirill A. Shutemov
On Tue 17-10-17 12:20:52, Michal Hocko wrote:
> [CC Kirill]
now for real
> On Mon 16-10-17 17:19:16, changbin.du@intel.com wrote:
> > From: Changbin Du <changbin.du@intel.com>
> >
> > This patch introduced 4 new interfaces to allocate a prepared
> > transparent huge page.
> > - alloc_transhuge_page_vma
> > - alloc_transhuge_page_nodemask
> > - alloc_transhuge_page_node
> > - alloc_transhuge_page
> >
> > The aim is to remove duplicated code and simplify transparent
> > huge page allocation. These are similar to alloc_hugepage_xxx
> > which are for hugetlbfs pages. This patch does below changes:
> > - define alloc_transhuge_page_xxx interfaces
> > - apply them to all existing code
> > - declare prep_transhuge_page as static since no others use it
> > - remove alloc_hugepage_vma definition since it no longer has users
>
> So what exactly is the advantage of the new API? The diffstat doesn't
> sound very convincing to me.
>
> > Signed-off-by: Changbin Du <changbin.du@intel.com>
> > ---
> > include/linux/gfp.h | 4 ----
> > include/linux/huge_mm.h | 13 ++++++++++++-
> > include/linux/migrate.h | 14 +++++---------
> > mm/huge_memory.c | 50 ++++++++++++++++++++++++++++++++++++++++++-------
> > mm/khugepaged.c | 11 ++---------
> > mm/mempolicy.c | 10 +++-------
> > mm/migrate.c | 12 ++++--------
> > mm/shmem.c | 6 ++----
> > 8 files changed, 71 insertions(+), 49 deletions(-)
> >
> > diff --git a/include/linux/gfp.h b/include/linux/gfp.h
> > index f780718..855c72e 100644
> > --- a/include/linux/gfp.h
> > +++ b/include/linux/gfp.h
> > @@ -507,15 +507,11 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
> > extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
> > struct vm_area_struct *vma, unsigned long addr,
> > int node, bool hugepage);
> > -#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
> > - alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
> > #else
> > #define alloc_pages(gfp_mask, order) \
> > alloc_pages_node(numa_node_id(), gfp_mask, order)
> > #define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
> > alloc_pages(gfp_mask, order)
> > -#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
> > - alloc_pages(gfp_mask, order)
> > #endif
> > #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
> > #define alloc_page_vma(gfp_mask, vma, addr) \
> > diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
> > index 14bc21c..1dd2c33 100644
> > --- a/include/linux/huge_mm.h
> > +++ b/include/linux/huge_mm.h
> > @@ -130,9 +130,20 @@ extern unsigned long thp_get_unmapped_area(struct file *filp,
> > unsigned long addr, unsigned long len, unsigned long pgoff,
> > unsigned long flags);
> >
> > -extern void prep_transhuge_page(struct page *page);
> > extern void free_transhuge_page(struct page *page);
> >
> > +struct page *alloc_transhuge_page_vma(gfp_t gfp_mask,
> > + struct vm_area_struct *vma, unsigned long addr);
> > +struct page *alloc_transhuge_page_nodemask(gfp_t gfp_mask,
> > + int preferred_nid, nodemask_t *nmask);
> > +
> > +static inline struct page *alloc_transhuge_page_node(int nid, gfp_t gfp_mask)
> > +{
> > + return alloc_transhuge_page_nodemask(gfp_mask, nid, NULL);
> > +}
> > +
> > +struct page *alloc_transhuge_page(gfp_t gfp_mask);
> > +
> > bool can_split_huge_page(struct page *page, int *pextra_pins);
> > int split_huge_page_to_list(struct page *page, struct list_head *list);
> > static inline int split_huge_page(struct page *page)
> > diff --git a/include/linux/migrate.h b/include/linux/migrate.h
> > index 643c7ae..70a00f3 100644
> > --- a/include/linux/migrate.h
> > +++ b/include/linux/migrate.h
> > @@ -42,19 +42,15 @@ static inline struct page *new_page_nodemask(struct page *page,
> > return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
> > preferred_nid, nodemask);
> >
> > - if (thp_migration_supported() && PageTransHuge(page)) {
> > - order = HPAGE_PMD_ORDER;
> > - gfp_mask |= GFP_TRANSHUGE;
> > - }
> > -
> > if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
> > gfp_mask |= __GFP_HIGHMEM;
> >
> > - new_page = __alloc_pages_nodemask(gfp_mask, order,
> > + if (thp_migration_supported() && PageTransHuge(page))
> > + return alloc_transhuge_page_nodemask(gfp_mask | GFP_TRANSHUGE,
> > + preferred_nid, nodemask);
> > + else
> > + return __alloc_pages_nodemask(gfp_mask, order,
> > preferred_nid, nodemask);
> > -
> > - if (new_page && PageTransHuge(page))
> > - prep_transhuge_page(new_page);
> >
> > return new_page;
> > }
> > diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> > index 269b5df..e267488 100644
> > --- a/mm/huge_memory.c
> > +++ b/mm/huge_memory.c
> > @@ -490,7 +490,7 @@ static inline struct list_head *page_deferred_list(struct page *page)
> > return (struct list_head *)&page[2].mapping;
> > }
> >
> > -void prep_transhuge_page(struct page *page)
> > +static void prep_transhuge_page(struct page *page)
> > {
> > /*
> > * we use page->mapping and page->indexlru in second tail page
> > @@ -501,6 +501,45 @@ void prep_transhuge_page(struct page *page)
> > set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
> > }
> >
> > +struct page *alloc_transhuge_page_vma(gfp_t gfp_mask,
> > + struct vm_area_struct *vma, unsigned long addr)
> > +{
> > + struct page *page;
> > +
> > + page = alloc_pages_vma(gfp_mask | __GFP_COMP, HPAGE_PMD_ORDER,
> > + vma, addr, numa_node_id(), true);
> > + if (unlikely(!page))
> > + return NULL;
> > + prep_transhuge_page(page);
> > + return page;
> > +}
> > +
> > +struct page *alloc_transhuge_page_nodemask(gfp_t gfp_mask,
> > + int preferred_nid, nodemask_t *nmask)
> > +{
> > + struct page *page;
> > +
> > + page = __alloc_pages_nodemask(gfp_mask | __GFP_COMP, HPAGE_PMD_ORDER,
> > + preferred_nid, nmask);
> > + if (unlikely(!page))
> > + return NULL;
> > + prep_transhuge_page(page);
> > + return page;
> > +}
> > +
> > +struct page *alloc_transhuge_page(gfp_t gfp_mask)
> > +{
> > + struct page *page;
> > +
> > + VM_BUG_ON(!(gfp_mask & __GFP_COMP));
> > +
> > + page = alloc_pages(gfp_mask | __GFP_COMP, HPAGE_PMD_ORDER);
> > + if (unlikely(!page))
> > + return NULL;
> > + prep_transhuge_page(page);
> > + return page;
> > +}
> > +
> > unsigned long __thp_get_unmapped_area(struct file *filp, unsigned long len,
> > loff_t off, unsigned long flags, unsigned long size)
> > {
> > @@ -719,12 +758,11 @@ int do_huge_pmd_anonymous_page(struct vm_fault *vmf)
> > return ret;
> > }
> > gfp = alloc_hugepage_direct_gfpmask(vma);
> > - page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
> > + page = alloc_transhuge_page_vma(gfp, vma, haddr);
> > if (unlikely(!page)) {
> > count_vm_event(THP_FAULT_FALLBACK);
> > return VM_FAULT_FALLBACK;
> > }
> > - prep_transhuge_page(page);
> > return __do_huge_pmd_anonymous_page(vmf, page, gfp);
> > }
> >
> > @@ -1288,13 +1326,11 @@ int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
> > if (transparent_hugepage_enabled(vma) &&
> > !transparent_hugepage_debug_cow()) {
> > huge_gfp = alloc_hugepage_direct_gfpmask(vma);
> > - new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER);
> > + new_page = alloc_transhuge_page_vma(huge_gfp, vma, haddr);
> > } else
> > new_page = NULL;
> >
> > - if (likely(new_page)) {
> > - prep_transhuge_page(new_page);
> > - } else {
> > + if (unlikely(!new_page)) {
> > if (!page) {
> > split_huge_pmd(vma, vmf->pmd, vmf->address);
> > ret |= VM_FAULT_FALLBACK;
> > diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> > index c01f177..d17a694 100644
> > --- a/mm/khugepaged.c
> > +++ b/mm/khugepaged.c
> > @@ -745,14 +745,13 @@ khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
> > {
> > VM_BUG_ON_PAGE(*hpage, *hpage);
> >
> > - *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
> > + *hpage = alloc_transhuge_page_node(node, gfp);
> > if (unlikely(!*hpage)) {
> > count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
> > *hpage = ERR_PTR(-ENOMEM);
> > return NULL;
> > }
> >
> > - prep_transhuge_page(*hpage);
> > count_vm_event(THP_COLLAPSE_ALLOC);
> > return *hpage;
> > }
> > @@ -764,13 +763,7 @@ static int khugepaged_find_target_node(void)
> >
> > static inline struct page *alloc_khugepaged_hugepage(void)
> > {
> > - struct page *page;
> > -
> > - page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
> > - HPAGE_PMD_ORDER);
> > - if (page)
> > - prep_transhuge_page(page);
> > - return page;
> > + return alloc_transhuge_page(alloc_hugepage_khugepaged_gfpmask());
> > }
> >
> > static struct page *khugepaged_alloc_hugepage(bool *wait)
> > diff --git a/mm/mempolicy.c b/mm/mempolicy.c
> > index a2af6d5..aa24285 100644
> > --- a/mm/mempolicy.c
> > +++ b/mm/mempolicy.c
> > @@ -949,12 +949,10 @@ static struct page *new_node_page(struct page *page, unsigned long node, int **x
> > else if (thp_migration_supported() && PageTransHuge(page)) {
> > struct page *thp;
> >
> > - thp = alloc_pages_node(node,
> > - (GFP_TRANSHUGE | __GFP_THISNODE),
> > - HPAGE_PMD_ORDER);
> > + thp = alloc_transhuge_page_node(node,
> > + (GFP_TRANSHUGE | __GFP_THISNODE));
> > if (!thp)
> > return NULL;
> > - prep_transhuge_page(thp);
> > return thp;
> > } else
> > return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
> > @@ -1125,11 +1123,9 @@ static struct page *new_page(struct page *page, unsigned long start, int **x)
> > } else if (thp_migration_supported() && PageTransHuge(page)) {
> > struct page *thp;
> >
> > - thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
> > - HPAGE_PMD_ORDER);
> > + thp = alloc_transhuge_page_vma(GFP_TRANSHUGE, vma, address);
> > if (!thp)
> > return NULL;
> > - prep_transhuge_page(thp);
> > return thp;
> > }
> > /*
> > diff --git a/mm/migrate.c b/mm/migrate.c
> > index e00814c..7f0486f 100644
> > --- a/mm/migrate.c
> > +++ b/mm/migrate.c
> > @@ -1472,12 +1472,10 @@ static struct page *new_page_node(struct page *p, unsigned long private,
> > else if (thp_migration_supported() && PageTransHuge(p)) {
> > struct page *thp;
> >
> > - thp = alloc_pages_node(pm->node,
> > - (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_RECLAIM,
> > - HPAGE_PMD_ORDER);
> > + thp = alloc_transhuge_page_node(pm->node,
> > + (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_RECLAIM);
> > if (!thp)
> > return NULL;
> > - prep_transhuge_page(thp);
> > return thp;
> > } else
> > return __alloc_pages_node(pm->node,
> > @@ -2017,12 +2015,10 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
> > if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR))
> > goto out_dropref;
> >
> > - new_page = alloc_pages_node(node,
> > - (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
> > - HPAGE_PMD_ORDER);
> > + new_page = alloc_transhuge_page_node(node,
> > + (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE));
> > if (!new_page)
> > goto out_fail;
> > - prep_transhuge_page(new_page);
> >
> > isolated = numamigrate_isolate_page(pgdat, page);
> > if (!isolated) {
> > diff --git a/mm/shmem.c b/mm/shmem.c
> > index 07a1d22..52468f7 100644
> > --- a/mm/shmem.c
> > +++ b/mm/shmem.c
> > @@ -1444,11 +1444,9 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp,
> > rcu_read_unlock();
> >
> > shmem_pseudo_vma_init(&pvma, info, hindex);
> > - page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
> > - HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true);
> > + gfp |= __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN;
> > + page = alloc_transhuge_page_vma(gfp, &pvma, 0);
> > shmem_pseudo_vma_destroy(&pvma);
> > - if (page)
> > - prep_transhuge_page(page);
> > return page;
> > }
> >
> > --
> > 2.7.4
> >
> > --
> > To unsubscribe, send a message with 'unsubscribe linux-mm' in
> > the body to majordomo@kvack.org. For more info on Linux MM,
> > see: http://www.linux-mm.org/ .
> > Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
>
> --
> Michal Hocko
> SUSE Labs
--
Michal Hocko
SUSE Labs
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH 1/2] mm, thp: introduce dedicated transparent huge page allocation interfaces
2017-10-17 10:20 ` Michal Hocko
2017-10-17 10:22 ` Michal Hocko
@ 2017-10-18 11:00 ` Du, Changbin
2017-10-19 12:49 ` Michal Hocko
1 sibling, 1 reply; 21+ messages in thread
From: Du, Changbin @ 2017-10-18 11:00 UTC (permalink / raw)
To: Michal Hocko
Cc: changbin.du, akpm, corbet, hughd, linux-doc, linux-kernel,
linux-mm
[-- Attachment #1: Type: text/plain, Size: 12669 bytes --]
Hi Hocko,
On Tue, Oct 17, 2017 at 12:20:52PM +0200, Michal Hocko wrote:
> [CC Kirill]
>
> On Mon 16-10-17 17:19:16, changbin.du@intel.com wrote:
> > From: Changbin Du <changbin.du@intel.com>
> >
> > This patch introduced 4 new interfaces to allocate a prepared
> > transparent huge page.
> > - alloc_transhuge_page_vma
> > - alloc_transhuge_page_nodemask
> > - alloc_transhuge_page_node
> > - alloc_transhuge_page
> >
> > The aim is to remove duplicated code and simplify transparent
> > huge page allocation. These are similar to alloc_hugepage_xxx
> > which are for hugetlbfs pages. This patch does below changes:
> > - define alloc_transhuge_page_xxx interfaces
> > - apply them to all existing code
> > - declare prep_transhuge_page as static since no others use it
> > - remove alloc_hugepage_vma definition since it no longer has users
>
> So what exactly is the advantage of the new API? The diffstat doesn't
> sound very convincing to me.
>
The caller only need one step to allocate thp. Several LOCs removed for all the
caller side with this change. So it's little more convinent.
> > Signed-off-by: Changbin Du <changbin.du@intel.com>
> > ---
> > include/linux/gfp.h | 4 ----
> > include/linux/huge_mm.h | 13 ++++++++++++-
> > include/linux/migrate.h | 14 +++++---------
> > mm/huge_memory.c | 50 ++++++++++++++++++++++++++++++++++++++++++-------
> > mm/khugepaged.c | 11 ++---------
> > mm/mempolicy.c | 10 +++-------
> > mm/migrate.c | 12 ++++--------
> > mm/shmem.c | 6 ++----
> > 8 files changed, 71 insertions(+), 49 deletions(-)
> >
> > diff --git a/include/linux/gfp.h b/include/linux/gfp.h
> > index f780718..855c72e 100644
> > --- a/include/linux/gfp.h
> > +++ b/include/linux/gfp.h
> > @@ -507,15 +507,11 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
> > extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
> > struct vm_area_struct *vma, unsigned long addr,
> > int node, bool hugepage);
> > -#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
> > - alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
> > #else
> > #define alloc_pages(gfp_mask, order) \
> > alloc_pages_node(numa_node_id(), gfp_mask, order)
> > #define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
> > alloc_pages(gfp_mask, order)
> > -#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
> > - alloc_pages(gfp_mask, order)
> > #endif
> > #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
> > #define alloc_page_vma(gfp_mask, vma, addr) \
> > diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
> > index 14bc21c..1dd2c33 100644
> > --- a/include/linux/huge_mm.h
> > +++ b/include/linux/huge_mm.h
> > @@ -130,9 +130,20 @@ extern unsigned long thp_get_unmapped_area(struct file *filp,
> > unsigned long addr, unsigned long len, unsigned long pgoff,
> > unsigned long flags);
> >
> > -extern void prep_transhuge_page(struct page *page);
> > extern void free_transhuge_page(struct page *page);
> >
> > +struct page *alloc_transhuge_page_vma(gfp_t gfp_mask,
> > + struct vm_area_struct *vma, unsigned long addr);
> > +struct page *alloc_transhuge_page_nodemask(gfp_t gfp_mask,
> > + int preferred_nid, nodemask_t *nmask);
> > +
> > +static inline struct page *alloc_transhuge_page_node(int nid, gfp_t gfp_mask)
> > +{
> > + return alloc_transhuge_page_nodemask(gfp_mask, nid, NULL);
> > +}
> > +
> > +struct page *alloc_transhuge_page(gfp_t gfp_mask);
> > +
> > bool can_split_huge_page(struct page *page, int *pextra_pins);
> > int split_huge_page_to_list(struct page *page, struct list_head *list);
> > static inline int split_huge_page(struct page *page)
> > diff --git a/include/linux/migrate.h b/include/linux/migrate.h
> > index 643c7ae..70a00f3 100644
> > --- a/include/linux/migrate.h
> > +++ b/include/linux/migrate.h
> > @@ -42,19 +42,15 @@ static inline struct page *new_page_nodemask(struct page *page,
> > return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
> > preferred_nid, nodemask);
> >
> > - if (thp_migration_supported() && PageTransHuge(page)) {
> > - order = HPAGE_PMD_ORDER;
> > - gfp_mask |= GFP_TRANSHUGE;
> > - }
> > -
> > if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
> > gfp_mask |= __GFP_HIGHMEM;
> >
> > - new_page = __alloc_pages_nodemask(gfp_mask, order,
> > + if (thp_migration_supported() && PageTransHuge(page))
> > + return alloc_transhuge_page_nodemask(gfp_mask | GFP_TRANSHUGE,
> > + preferred_nid, nodemask);
> > + else
> > + return __alloc_pages_nodemask(gfp_mask, order,
> > preferred_nid, nodemask);
> > -
> > - if (new_page && PageTransHuge(page))
> > - prep_transhuge_page(new_page);
> >
> > return new_page;
> > }
> > diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> > index 269b5df..e267488 100644
> > --- a/mm/huge_memory.c
> > +++ b/mm/huge_memory.c
> > @@ -490,7 +490,7 @@ static inline struct list_head *page_deferred_list(struct page *page)
> > return (struct list_head *)&page[2].mapping;
> > }
> >
> > -void prep_transhuge_page(struct page *page)
> > +static void prep_transhuge_page(struct page *page)
> > {
> > /*
> > * we use page->mapping and page->indexlru in second tail page
> > @@ -501,6 +501,45 @@ void prep_transhuge_page(struct page *page)
> > set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
> > }
> >
> > +struct page *alloc_transhuge_page_vma(gfp_t gfp_mask,
> > + struct vm_area_struct *vma, unsigned long addr)
> > +{
> > + struct page *page;
> > +
> > + page = alloc_pages_vma(gfp_mask | __GFP_COMP, HPAGE_PMD_ORDER,
> > + vma, addr, numa_node_id(), true);
> > + if (unlikely(!page))
> > + return NULL;
> > + prep_transhuge_page(page);
> > + return page;
> > +}
> > +
> > +struct page *alloc_transhuge_page_nodemask(gfp_t gfp_mask,
> > + int preferred_nid, nodemask_t *nmask)
> > +{
> > + struct page *page;
> > +
> > + page = __alloc_pages_nodemask(gfp_mask | __GFP_COMP, HPAGE_PMD_ORDER,
> > + preferred_nid, nmask);
> > + if (unlikely(!page))
> > + return NULL;
> > + prep_transhuge_page(page);
> > + return page;
> > +}
> > +
> > +struct page *alloc_transhuge_page(gfp_t gfp_mask)
> > +{
> > + struct page *page;
> > +
> > + VM_BUG_ON(!(gfp_mask & __GFP_COMP));
> > +
> > + page = alloc_pages(gfp_mask | __GFP_COMP, HPAGE_PMD_ORDER);
> > + if (unlikely(!page))
> > + return NULL;
> > + prep_transhuge_page(page);
> > + return page;
> > +}
> > +
> > unsigned long __thp_get_unmapped_area(struct file *filp, unsigned long len,
> > loff_t off, unsigned long flags, unsigned long size)
> > {
> > @@ -719,12 +758,11 @@ int do_huge_pmd_anonymous_page(struct vm_fault *vmf)
> > return ret;
> > }
> > gfp = alloc_hugepage_direct_gfpmask(vma);
> > - page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
> > + page = alloc_transhuge_page_vma(gfp, vma, haddr);
> > if (unlikely(!page)) {
> > count_vm_event(THP_FAULT_FALLBACK);
> > return VM_FAULT_FALLBACK;
> > }
> > - prep_transhuge_page(page);
> > return __do_huge_pmd_anonymous_page(vmf, page, gfp);
> > }
> >
> > @@ -1288,13 +1326,11 @@ int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
> > if (transparent_hugepage_enabled(vma) &&
> > !transparent_hugepage_debug_cow()) {
> > huge_gfp = alloc_hugepage_direct_gfpmask(vma);
> > - new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER);
> > + new_page = alloc_transhuge_page_vma(huge_gfp, vma, haddr);
> > } else
> > new_page = NULL;
> >
> > - if (likely(new_page)) {
> > - prep_transhuge_page(new_page);
> > - } else {
> > + if (unlikely(!new_page)) {
> > if (!page) {
> > split_huge_pmd(vma, vmf->pmd, vmf->address);
> > ret |= VM_FAULT_FALLBACK;
> > diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> > index c01f177..d17a694 100644
> > --- a/mm/khugepaged.c
> > +++ b/mm/khugepaged.c
> > @@ -745,14 +745,13 @@ khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
> > {
> > VM_BUG_ON_PAGE(*hpage, *hpage);
> >
> > - *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
> > + *hpage = alloc_transhuge_page_node(node, gfp);
> > if (unlikely(!*hpage)) {
> > count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
> > *hpage = ERR_PTR(-ENOMEM);
> > return NULL;
> > }
> >
> > - prep_transhuge_page(*hpage);
> > count_vm_event(THP_COLLAPSE_ALLOC);
> > return *hpage;
> > }
> > @@ -764,13 +763,7 @@ static int khugepaged_find_target_node(void)
> >
> > static inline struct page *alloc_khugepaged_hugepage(void)
> > {
> > - struct page *page;
> > -
> > - page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
> > - HPAGE_PMD_ORDER);
> > - if (page)
> > - prep_transhuge_page(page);
> > - return page;
> > + return alloc_transhuge_page(alloc_hugepage_khugepaged_gfpmask());
> > }
> >
> > static struct page *khugepaged_alloc_hugepage(bool *wait)
> > diff --git a/mm/mempolicy.c b/mm/mempolicy.c
> > index a2af6d5..aa24285 100644
> > --- a/mm/mempolicy.c
> > +++ b/mm/mempolicy.c
> > @@ -949,12 +949,10 @@ static struct page *new_node_page(struct page *page, unsigned long node, int **x
> > else if (thp_migration_supported() && PageTransHuge(page)) {
> > struct page *thp;
> >
> > - thp = alloc_pages_node(node,
> > - (GFP_TRANSHUGE | __GFP_THISNODE),
> > - HPAGE_PMD_ORDER);
> > + thp = alloc_transhuge_page_node(node,
> > + (GFP_TRANSHUGE | __GFP_THISNODE));
> > if (!thp)
> > return NULL;
> > - prep_transhuge_page(thp);
> > return thp;
> > } else
> > return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
> > @@ -1125,11 +1123,9 @@ static struct page *new_page(struct page *page, unsigned long start, int **x)
> > } else if (thp_migration_supported() && PageTransHuge(page)) {
> > struct page *thp;
> >
> > - thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
> > - HPAGE_PMD_ORDER);
> > + thp = alloc_transhuge_page_vma(GFP_TRANSHUGE, vma, address);
> > if (!thp)
> > return NULL;
> > - prep_transhuge_page(thp);
> > return thp;
> > }
> > /*
> > diff --git a/mm/migrate.c b/mm/migrate.c
> > index e00814c..7f0486f 100644
> > --- a/mm/migrate.c
> > +++ b/mm/migrate.c
> > @@ -1472,12 +1472,10 @@ static struct page *new_page_node(struct page *p, unsigned long private,
> > else if (thp_migration_supported() && PageTransHuge(p)) {
> > struct page *thp;
> >
> > - thp = alloc_pages_node(pm->node,
> > - (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_RECLAIM,
> > - HPAGE_PMD_ORDER);
> > + thp = alloc_transhuge_page_node(pm->node,
> > + (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_RECLAIM);
> > if (!thp)
> > return NULL;
> > - prep_transhuge_page(thp);
> > return thp;
> > } else
> > return __alloc_pages_node(pm->node,
> > @@ -2017,12 +2015,10 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
> > if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR))
> > goto out_dropref;
> >
> > - new_page = alloc_pages_node(node,
> > - (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
> > - HPAGE_PMD_ORDER);
> > + new_page = alloc_transhuge_page_node(node,
> > + (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE));
> > if (!new_page)
> > goto out_fail;
> > - prep_transhuge_page(new_page);
> >
> > isolated = numamigrate_isolate_page(pgdat, page);
> > if (!isolated) {
> > diff --git a/mm/shmem.c b/mm/shmem.c
> > index 07a1d22..52468f7 100644
> > --- a/mm/shmem.c
> > +++ b/mm/shmem.c
> > @@ -1444,11 +1444,9 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp,
> > rcu_read_unlock();
> >
> > shmem_pseudo_vma_init(&pvma, info, hindex);
> > - page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
> > - HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true);
> > + gfp |= __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN;
> > + page = alloc_transhuge_page_vma(gfp, &pvma, 0);
> > shmem_pseudo_vma_destroy(&pvma);
> > - if (page)
> > - prep_transhuge_page(page);
> > return page;
> > }
> >
> > --
> > 2.7.4
> >
> > --
> > To unsubscribe, send a message with 'unsubscribe linux-mm' in
> > the body to majordomo@kvack.org. For more info on Linux MM,
> > see: http://www.linux-mm.org/ .
> > Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
>
> --
> Michal Hocko
> SUSE Labs
--
Thanks,
Changbin Du
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 473 bytes --]
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH 1/2] mm, thp: introduce dedicated transparent huge page allocation interfaces
2017-10-18 11:00 ` Du, Changbin
@ 2017-10-19 12:49 ` Michal Hocko
2017-10-20 8:31 ` Du, Changbin
0 siblings, 1 reply; 21+ messages in thread
From: Michal Hocko @ 2017-10-19 12:49 UTC (permalink / raw)
To: Du, Changbin; +Cc: akpm, corbet, hughd, linux-doc, linux-kernel, linux-mm
On Wed 18-10-17 19:00:26, Du, Changbin wrote:
> Hi Hocko,
>
> On Tue, Oct 17, 2017 at 12:20:52PM +0200, Michal Hocko wrote:
> > [CC Kirill]
> >
> > On Mon 16-10-17 17:19:16, changbin.du@intel.com wrote:
> > > From: Changbin Du <changbin.du@intel.com>
> > >
> > > This patch introduced 4 new interfaces to allocate a prepared
> > > transparent huge page.
> > > - alloc_transhuge_page_vma
> > > - alloc_transhuge_page_nodemask
> > > - alloc_transhuge_page_node
> > > - alloc_transhuge_page
> > >
> > > The aim is to remove duplicated code and simplify transparent
> > > huge page allocation. These are similar to alloc_hugepage_xxx
> > > which are for hugetlbfs pages. This patch does below changes:
> > > - define alloc_transhuge_page_xxx interfaces
> > > - apply them to all existing code
> > > - declare prep_transhuge_page as static since no others use it
> > > - remove alloc_hugepage_vma definition since it no longer has users
> >
> > So what exactly is the advantage of the new API? The diffstat doesn't
> > sound very convincing to me.
> >
> The caller only need one step to allocate thp. Several LOCs removed for all the
> caller side with this change. So it's little more convinent.
Yeah, but the overall result is more code. So I am not really convinced.
--
Michal Hocko
SUSE Labs
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH 1/2] mm, thp: introduce dedicated transparent huge page allocation interfaces
2017-10-19 12:49 ` Michal Hocko
@ 2017-10-20 8:31 ` Du, Changbin
0 siblings, 0 replies; 21+ messages in thread
From: Du, Changbin @ 2017-10-20 8:31 UTC (permalink / raw)
To: Michal Hocko
Cc: Du, Changbin, akpm, corbet, hughd, linux-doc, linux-kernel,
linux-mm
[-- Attachment #1: Type: text/plain, Size: 1784 bytes --]
Hi Hocko,
On Thu, Oct 19, 2017 at 02:49:31PM +0200, Michal Hocko wrote:
> On Wed 18-10-17 19:00:26, Du, Changbin wrote:
> > Hi Hocko,
> >
> > On Tue, Oct 17, 2017 at 12:20:52PM +0200, Michal Hocko wrote:
> > > [CC Kirill]
> > >
> > > On Mon 16-10-17 17:19:16, changbin.du@intel.com wrote:
> > > > From: Changbin Du <changbin.du@intel.com>
> > > >
> > > > This patch introduced 4 new interfaces to allocate a prepared
> > > > transparent huge page.
> > > > - alloc_transhuge_page_vma
> > > > - alloc_transhuge_page_nodemask
> > > > - alloc_transhuge_page_node
> > > > - alloc_transhuge_page
> > > >
> > > > The aim is to remove duplicated code and simplify transparent
> > > > huge page allocation. These are similar to alloc_hugepage_xxx
> > > > which are for hugetlbfs pages. This patch does below changes:
> > > > - define alloc_transhuge_page_xxx interfaces
> > > > - apply them to all existing code
> > > > - declare prep_transhuge_page as static since no others use it
> > > > - remove alloc_hugepage_vma definition since it no longer has users
> > >
> > > So what exactly is the advantage of the new API? The diffstat doesn't
> > > sound very convincing to me.
> > >
> > The caller only need one step to allocate thp. Several LOCs removed for all the
> > caller side with this change. So it's little more convinent.
>
> Yeah, but the overall result is more code. So I am not really convinced.
Yes, but some of code are just to make compiler happy (declarations). These are
just simple light wrappers same as other functions in kernel. At least the code
readbility is improved by this, two steps allocation merged into one so
duplicated logic removed.
> --
> Michal Hocko
> SUSE Labs
--
Thanks,
Changbin Du
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 473 bytes --]
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH 1/2] mm, thp: introduce dedicated transparent huge page allocation interfaces
2017-10-16 9:19 ` [PATCH 1/2] " changbin.du
2017-10-17 8:08 ` Anshuman Khandual
2017-10-17 10:20 ` Michal Hocko
@ 2017-10-17 11:12 ` Kirill A. Shutemov
2017-10-18 11:01 ` Du, Changbin
2017-10-18 15:54 ` kbuild test robot
2017-10-18 16:01 ` kbuild test robot
4 siblings, 1 reply; 21+ messages in thread
From: Kirill A. Shutemov @ 2017-10-17 11:12 UTC (permalink / raw)
To: changbin.du; +Cc: akpm, corbet, hughd, linux-doc, linux-kernel, linux-mm
On Mon, Oct 16, 2017 at 05:19:16PM +0800, changbin.du@intel.com wrote:
> @@ -501,6 +501,45 @@ void prep_transhuge_page(struct page *page)
> set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
> }
>
> +struct page *alloc_transhuge_page_vma(gfp_t gfp_mask,
> + struct vm_area_struct *vma, unsigned long addr)
> +{
> + struct page *page;
> +
> + page = alloc_pages_vma(gfp_mask | __GFP_COMP, HPAGE_PMD_ORDER,
> + vma, addr, numa_node_id(), true);
> + if (unlikely(!page))
> + return NULL;
> + prep_transhuge_page(page);
> + return page;
> +}
> +
> +struct page *alloc_transhuge_page_nodemask(gfp_t gfp_mask,
> + int preferred_nid, nodemask_t *nmask)
> +{
> + struct page *page;
> +
> + page = __alloc_pages_nodemask(gfp_mask | __GFP_COMP, HPAGE_PMD_ORDER,
> + preferred_nid, nmask);
> + if (unlikely(!page))
> + return NULL;
> + prep_transhuge_page(page);
> + return page;
> +}
> +
> +struct page *alloc_transhuge_page(gfp_t gfp_mask)
> +{
> + struct page *page;
> +
> + VM_BUG_ON(!(gfp_mask & __GFP_COMP));
Why do you check for __GFP_COMP only in this helper?
> + page = alloc_pages(gfp_mask | __GFP_COMP, HPAGE_PMD_ORDER);
And still apply __GFP_COMP anyway?
> + if (unlikely(!page))
> + return NULL;
> + prep_transhuge_page(page);
> + return page;
> +}
> +
> unsigned long __thp_get_unmapped_area(struct file *filp, unsigned long len,
> loff_t off, unsigned long flags, unsigned long size)
> {
--
Kirill A. Shutemov
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH 1/2] mm, thp: introduce dedicated transparent huge page allocation interfaces
2017-10-17 11:12 ` Kirill A. Shutemov
@ 2017-10-18 11:01 ` Du, Changbin
0 siblings, 0 replies; 21+ messages in thread
From: Du, Changbin @ 2017-10-18 11:01 UTC (permalink / raw)
To: Kirill A. Shutemov
Cc: changbin.du, akpm, corbet, hughd, linux-doc, linux-kernel,
linux-mm
[-- Attachment #1: Type: text/plain, Size: 1760 bytes --]
On Tue, Oct 17, 2017 at 02:12:46PM +0300, Kirill A. Shutemov wrote:
> On Mon, Oct 16, 2017 at 05:19:16PM +0800, changbin.du@intel.com wrote:
> > @@ -501,6 +501,45 @@ void prep_transhuge_page(struct page *page)
> > set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
> > }
> >
> > +struct page *alloc_transhuge_page_vma(gfp_t gfp_mask,
> > + struct vm_area_struct *vma, unsigned long addr)
> > +{
> > + struct page *page;
> > +
> > + page = alloc_pages_vma(gfp_mask | __GFP_COMP, HPAGE_PMD_ORDER,
> > + vma, addr, numa_node_id(), true);
> > + if (unlikely(!page))
> > + return NULL;
> > + prep_transhuge_page(page);
> > + return page;
> > +}
> > +
> > +struct page *alloc_transhuge_page_nodemask(gfp_t gfp_mask,
> > + int preferred_nid, nodemask_t *nmask)
> > +{
> > + struct page *page;
> > +
> > + page = __alloc_pages_nodemask(gfp_mask | __GFP_COMP, HPAGE_PMD_ORDER,
> > + preferred_nid, nmask);
> > + if (unlikely(!page))
> > + return NULL;
> > + prep_transhuge_page(page);
> > + return page;
> > +}
> > +
> > +struct page *alloc_transhuge_page(gfp_t gfp_mask)
> > +{
> > + struct page *page;
> > +
> > + VM_BUG_ON(!(gfp_mask & __GFP_COMP));
>
> Why do you check for __GFP_COMP only in this helper?
>
> > + page = alloc_pages(gfp_mask | __GFP_COMP, HPAGE_PMD_ORDER);
>
> And still apply __GFP_COMP anyway?
>
This is a mistake, will removed. Thanks.
> > + if (unlikely(!page))
> > + return NULL;
> > + prep_transhuge_page(page);
> > + return page;
> > +}
> > +
> > unsigned long __thp_get_unmapped_area(struct file *filp, unsigned long len,
> > loff_t off, unsigned long flags, unsigned long size)
> > {
>
> --
> Kirill A. Shutemov
--
Thanks,
Changbin Du
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 473 bytes --]
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH 1/2] mm, thp: introduce dedicated transparent huge page allocation interfaces
2017-10-16 9:19 ` [PATCH 1/2] " changbin.du
` (2 preceding siblings ...)
2017-10-17 11:12 ` Kirill A. Shutemov
@ 2017-10-18 15:54 ` kbuild test robot
2017-10-18 16:01 ` kbuild test robot
4 siblings, 0 replies; 21+ messages in thread
From: kbuild test robot @ 2017-10-18 15:54 UTC (permalink / raw)
To: changbin.du
Cc: kbuild-all, akpm, corbet, hughd, linux-doc, linux-kernel,
linux-mm
[-- Attachment #1: Type: text/plain, Size: 3038 bytes --]
Hi Changbin,
[auto build test ERROR on mmotm/master]
[also build test ERROR on v4.14-rc5 next-20171017]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]
url: https://github.com/0day-ci/linux/commits/changbin-du-intel-com/mm-thp-introduce-dedicated-transparent-huge-page-allocation-interfaces/20171018-230128
base: git://git.cmpxchg.org/linux-mmotm.git master
config: i386-randconfig-x003-201742 (attached as .config)
compiler: gcc-6 (Debian 6.2.0-3) 6.2.0 20160901
reproduce:
# save the attached .config to linux build tree
make ARCH=i386
All error/warnings (new ones prefixed by >>):
In file included from mm/shmem.c:70:0:
include/linux/migrate.h: In function 'new_page_nodemask':
include/linux/migrate.h:49:10: error: implicit declaration of function 'alloc_transhuge_page_nodemask' [-Werror=implicit-function-declaration]
return alloc_transhuge_page_nodemask(gfp_mask | GFP_TRANSHUGE,
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/linux/migrate.h:49:10: warning: return makes pointer from integer without a cast [-Wint-conversion]
return alloc_transhuge_page_nodemask(gfp_mask | GFP_TRANSHUGE,
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
preferred_nid, nodemask);
~~~~~~~~~~~~~~~~~~~~~~~~
mm/shmem.c: In function 'shmem_alloc_hugepage':
>> mm/shmem.c:1448:9: error: implicit declaration of function 'alloc_transhuge_page_vma' [-Werror=implicit-function-declaration]
page = alloc_transhuge_page_vma(gfp, &pvma, 0);
^~~~~~~~~~~~~~~~~~~~~~~~
>> mm/shmem.c:1448:7: warning: assignment makes pointer from integer without a cast [-Wint-conversion]
page = alloc_transhuge_page_vma(gfp, &pvma, 0);
^
cc1: some warnings being treated as errors
vim +/alloc_transhuge_page_vma +1448 mm/shmem.c
1423
1424 static struct page *shmem_alloc_hugepage(gfp_t gfp,
1425 struct shmem_inode_info *info, pgoff_t index)
1426 {
1427 struct vm_area_struct pvma;
1428 struct inode *inode = &info->vfs_inode;
1429 struct address_space *mapping = inode->i_mapping;
1430 pgoff_t idx, hindex;
1431 void __rcu **results;
1432 struct page *page;
1433
1434 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
1435 return NULL;
1436
1437 hindex = round_down(index, HPAGE_PMD_NR);
1438 rcu_read_lock();
1439 if (radix_tree_gang_lookup_slot(&mapping->page_tree, &results, &idx,
1440 hindex, 1) && idx < hindex + HPAGE_PMD_NR) {
1441 rcu_read_unlock();
1442 return NULL;
1443 }
1444 rcu_read_unlock();
1445
1446 shmem_pseudo_vma_init(&pvma, info, hindex);
1447 gfp |= __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN;
> 1448 page = alloc_transhuge_page_vma(gfp, &pvma, 0);
1449 shmem_pseudo_vma_destroy(&pvma);
1450 return page;
1451 }
1452
---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation
[-- Attachment #2: .config.gz --]
[-- Type: application/gzip, Size: 29387 bytes --]
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH 1/2] mm, thp: introduce dedicated transparent huge page allocation interfaces
2017-10-16 9:19 ` [PATCH 1/2] " changbin.du
` (3 preceding siblings ...)
2017-10-18 15:54 ` kbuild test robot
@ 2017-10-18 16:01 ` kbuild test robot
4 siblings, 0 replies; 21+ messages in thread
From: kbuild test robot @ 2017-10-18 16:01 UTC (permalink / raw)
To: changbin.du
Cc: kbuild-all, akpm, corbet, hughd, linux-doc, linux-kernel,
linux-mm
[-- Attachment #1: Type: text/plain, Size: 2584 bytes --]
Hi Changbin,
[auto build test ERROR on mmotm/master]
[also build test ERROR on v4.14-rc5 next-20171017]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]
url: https://github.com/0day-ci/linux/commits/changbin-du-intel-com/mm-thp-introduce-dedicated-transparent-huge-page-allocation-interfaces/20171018-230128
base: git://git.cmpxchg.org/linux-mmotm.git master
config: i386-randconfig-x001-201742 (attached as .config)
compiler: gcc-6 (Debian 6.2.0-3) 6.2.0 20160901
reproduce:
# save the attached .config to linux build tree
make ARCH=i386
All error/warnings (new ones prefixed by >>):
In file included from include/linux/balloon_compaction.h:48:0,
from drivers/virtio/virtio_balloon.c:29:
include/linux/migrate.h: In function 'new_page_nodemask':
>> include/linux/migrate.h:49:10: error: implicit declaration of function 'alloc_transhuge_page_nodemask' [-Werror=implicit-function-declaration]
return alloc_transhuge_page_nodemask(gfp_mask | GFP_TRANSHUGE,
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~
>> include/linux/migrate.h:49:10: warning: return makes pointer from integer without a cast [-Wint-conversion]
return alloc_transhuge_page_nodemask(gfp_mask | GFP_TRANSHUGE,
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
preferred_nid, nodemask);
~~~~~~~~~~~~~~~~~~~~~~~~
cc1: some warnings being treated as errors
vim +/alloc_transhuge_page_nodemask +49 include/linux/migrate.h
33
34 static inline struct page *new_page_nodemask(struct page *page,
35 int preferred_nid, nodemask_t *nodemask)
36 {
37 gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
38 unsigned int order = 0;
39 struct page *new_page = NULL;
40
41 if (PageHuge(page))
42 return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
43 preferred_nid, nodemask);
44
45 if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
46 gfp_mask |= __GFP_HIGHMEM;
47
48 if (thp_migration_supported() && PageTransHuge(page))
> 49 return alloc_transhuge_page_nodemask(gfp_mask | GFP_TRANSHUGE,
50 preferred_nid, nodemask);
51 else
52 return __alloc_pages_nodemask(gfp_mask, order,
53 preferred_nid, nodemask);
54
55 return new_page;
56 }
57
---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation
[-- Attachment #2: .config.gz --]
[-- Type: application/gzip, Size: 26934 bytes --]
^ permalink raw reply [flat|nested] 21+ messages in thread