linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Matthew Wilcox <willy@infradead.org>
To: linux-mm@kvack.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Subject: [PATCH v2 02/15] mm: Pass order to __alloc_pages_nodemask in GFP flags
Date: Fri, 10 May 2019 06:50:25 -0700	[thread overview]
Message-ID: <20190510135038.17129-3-willy@infradead.org> (raw)
In-Reply-To: <20190510135038.17129-1-willy@infradead.org>

From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

Save marshalling an extra argument in all the callers at the expense of
using five bits of the GFP flags.  We still have three GFP bits remaining
after doing this (and we can release one more by reallocating NORETRY,
RETRY_MAYFAIL and NOFAIL).

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/gfp.h     | 18 +++++++++++++++---
 include/linux/migrate.h |  2 +-
 mm/hugetlb.c            |  5 +++--
 mm/mempolicy.c          |  5 +++--
 mm/page_alloc.c         |  4 ++--
 5 files changed, 24 insertions(+), 10 deletions(-)

diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index fb07b503dc45..c466b08df0ec 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -219,6 +219,18 @@ struct vm_area_struct;
 /* Room for N __GFP_FOO bits */
 #define __GFP_BITS_SHIFT (23 + IS_ENABLED(CONFIG_LOCKDEP))
 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
+#define __GFP_ORDER(order) ((__force gfp_t)(order << __GFP_BITS_SHIFT))
+#define __GFP_PMD	__GFP_ORDER(PMD_SHIFT - PAGE_SHIFT)
+#define __GFP_PUD	__GFP_ORDER(PUD_SHIFT - PAGE_SHIFT)
+
+/*
+ * Extract the order from a GFP bitmask.
+ * Must be the top bits to avoid an AND operation.  Don't let
+ * __GFP_BITS_SHIFT get over 27, or we won't be able to encode orders
+ * above 15 (some architectures allow configuring MAX_ORDER up to 64,
+ * but I doubt larger than 31 are ever used).
+ */
+#define gfp_order(gfp)	(((__force unsigned int)gfp) >> __GFP_BITS_SHIFT)
 
 /**
  * DOC: Useful GFP flag combinations
@@ -464,13 +476,13 @@ static inline void arch_alloc_page(struct page *page, int order) { }
 #endif
 
 struct page *
-__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
-							nodemask_t *nodemask);
+__alloc_pages_nodemask(gfp_t gfp_mask, int preferred_nid, nodemask_t *nodemask);
 
 static inline struct page *
 __alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid)
 {
-	return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL);
+	return __alloc_pages_nodemask(gfp_mask | __GFP_ORDER(order),
+			preferred_nid, NULL);
 }
 
 /*
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index e13d9bf2f9a5..ba4385144cc9 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -50,7 +50,7 @@ static inline struct page *new_page_nodemask(struct page *page,
 	if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
 		gfp_mask |= __GFP_HIGHMEM;
 
-	new_page = __alloc_pages_nodemask(gfp_mask, order,
+	new_page = __alloc_pages_nodemask(gfp_mask | __GFP_ORDER(order),
 				preferred_nid, nodemask);
 
 	if (new_page && PageTransHuge(new_page))
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index bf58cee30f65..c8ee747ca437 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1409,10 +1409,11 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
 	int order = huge_page_order(h);
 	struct page *page;
 
-	gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
+	gfp_mask |= __GFP_COMP | __GFP_RETRY_MAYFAIL | __GFP_NOWARN |
+			__GFP_ORDER(order);
 	if (nid == NUMA_NO_NODE)
 		nid = numa_mem_id();
-	page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
+	page = __alloc_pages_nodemask(gfp_mask, nid, nmask);
 	if (page)
 		__count_vm_event(HTLB_BUDDY_PGALLOC);
 	else
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 2219e747df49..310ad69effdd 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2093,7 +2093,8 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
 
 	nmask = policy_nodemask(gfp, pol);
 	preferred_nid = policy_node(gfp, pol, node);
-	page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
+	page = __alloc_pages_nodemask(gfp | __GFP_ORDER(order), preferred_nid,
+			nmask);
 	mpol_cond_put(pol);
 out:
 	return page;
@@ -2129,7 +2130,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
 	if (pol->mode == MPOL_INTERLEAVE)
 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
 	else
-		page = __alloc_pages_nodemask(gfp, order,
+		page = __alloc_pages_nodemask(gfp | __GFP_ORDER(order),
 				policy_node(gfp, pol, numa_node_id()),
 				policy_nodemask(gfp, pol));
 
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 57373327712e..6e968ab91660 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4622,11 +4622,11 @@ static inline void finalise_ac(gfp_t gfp_mask, struct alloc_context *ac)
  * This is the 'heart' of the zoned buddy allocator.
  */
 struct page *
-__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
-							nodemask_t *nodemask)
+__alloc_pages_nodemask(gfp_t gfp_mask, int preferred_nid, nodemask_t *nodemask)
 {
 	struct page *page;
 	unsigned int alloc_flags = ALLOC_WMARK_LOW;
+	unsigned int order = gfp_order(gfp_mask);
 	gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
 	struct alloc_context ac = { };
 
-- 
2.20.1


  parent reply	other threads:[~2019-05-10 13:50 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-05-10 13:50 [PATCH v2 00/15] Remove 'order' argument from many mm functions Matthew Wilcox
2019-05-10 13:50 ` [PATCH v2 01/15] mm: Remove gfp_flags argument from rmqueue_pcplist Matthew Wilcox
2019-05-10 13:50 ` Matthew Wilcox [this message]
2019-05-10 13:50 ` [PATCH v2 03/15] mm: Pass order to __alloc_pages in GFP flags Matthew Wilcox
2019-05-10 13:50 ` [PATCH v2 04/15] mm: Pass order to alloc_page_interleave " Matthew Wilcox
2019-05-10 13:50 ` [PATCH v2 05/15] mm: Pass order to alloc_pages_current " Matthew Wilcox
2019-05-10 13:50 ` [PATCH v2 06/15] mm: Pass order to alloc_pages_vma " Matthew Wilcox
2019-05-10 13:50 ` [PATCH v2 07/15] mm: Pass order to __alloc_pages_node " Matthew Wilcox
2019-05-10 13:50 ` [PATCH v2 08/15] mm: Pass order to __get_free_page " Matthew Wilcox
2019-05-10 13:50 ` [PATCH v2 09/15] mm: Pass order to prep_new_page " Matthew Wilcox
2019-05-10 13:50 ` [PATCH v2 10/15] mm: Pass order to rmqueue " Matthew Wilcox
2019-05-10 13:50 ` [PATCH v2 11/15] mm: Pass order to get_page_from_freelist " Matthew Wilcox
2019-05-10 13:50 ` [PATCH v2 12/15] mm: Pass order to __alloc_pages_cpuset_fallback " Matthew Wilcox
2019-05-10 13:50 ` [PATCH v2 13/15] mm: Pass order to prepare_alloc_pages " Matthew Wilcox
2019-05-10 13:50 ` [PATCH v2 14/15] mm: Pass order to try_to_free_pages " Matthew Wilcox
2019-05-10 23:26   ` Ira Weiny
2019-05-10 13:50 ` [PATCH v2 15/15] mm: Pass order to node_reclaim() " Matthew Wilcox
2019-05-10 23:30 ` [PATCH v2 00/15] Remove 'order' argument from many mm functions Ira Weiny
2019-05-13 10:51 ` Michal Hocko
2019-05-13 11:21   ` Matthew Wilcox
2019-05-13 11:42     ` Michal Hocko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190510135038.17129-3-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).