From: Mel Gorman <mel@csn.ul.ie>
To: Mel Gorman <mel@csn.ul.ie>,
Linux Memory Management List <linux-mm@kvack.org>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>,
Christoph Lameter <cl@linux-foundation.org>,
Nick Piggin <npiggin@suse.de>,
Linux Kernel Mailing List <linux-kernel@vger.kernel.org>,
Lin Ming <ming.m.lin@intel.com>,
Zhang Yanmin <yanmin_zhang@linux.intel.com>,
Peter Zijlstra <peterz@infradead.org>,
Andrew Morton <akpm@linux-foundation.org>
Subject: [PATCH 11/25] Calculate the cold parameter for allocation only once
Date: Fri, 20 Mar 2009 10:02:58 +0000 [thread overview]
Message-ID: <1237543392-11797-12-git-send-email-mel@csn.ul.ie> (raw)
In-Reply-To: <1237543392-11797-1-git-send-email-mel@csn.ul.ie>
GFP mask is checked for __GFP_COLD has been specified when deciding which
end of the PCP lists to use. However, it is happening multiple times per
allocation, at least once per zone traversed. Calculate it once.
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
---
mm/page_alloc.c | 35 ++++++++++++++++++-----------------
1 files changed, 18 insertions(+), 17 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0558eb4..ad26052 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1068,11 +1068,10 @@ void split_page(struct page *page, unsigned int order)
*/
static struct page *buffered_rmqueue(struct zone *preferred_zone,
struct zone *zone, int order, gfp_t gfp_flags,
- int migratetype)
+ int migratetype, int cold)
{
unsigned long flags;
struct page *page;
- int cold = !!(gfp_flags & __GFP_COLD);
int cpu;
again:
@@ -1399,7 +1398,7 @@ static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
static struct page *
get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
- struct zone *preferred_zone, int migratetype)
+ struct zone *preferred_zone, int migratetype, int cold)
{
struct zoneref *z;
struct page *page = NULL;
@@ -1452,7 +1451,7 @@ zonelist_scan:
}
page = buffered_rmqueue(preferred_zone, zone, order,
- gfp_mask, migratetype);
+ gfp_mask, migratetype, cold);
if (page)
break;
this_zone_full:
@@ -1517,7 +1516,7 @@ static inline struct page *
__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, struct zone *preferred_zone,
- int migratetype)
+ int migratetype, int cold)
{
struct page *page;
@@ -1535,7 +1534,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
order, zonelist, high_zoneidx,
ALLOC_WMARK_HIGH|ALLOC_CPUSET,
- preferred_zone, migratetype);
+ preferred_zone, migratetype, cold);
if (page)
goto out;
@@ -1556,7 +1555,7 @@ static inline struct page *
__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
- int migratetype, unsigned long *did_some_progress)
+ int migratetype, int cold, unsigned long *did_some_progress)
{
struct page *page = NULL;
struct reclaim_state reclaim_state;
@@ -1589,7 +1588,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
page = get_page_from_freelist(gfp_mask, nodemask, order,
zonelist, high_zoneidx,
alloc_flags, preferred_zone,
- migratetype);
+ migratetype, cold);
return page;
}
@@ -1601,14 +1600,14 @@ static inline struct page *
__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, struct zone *preferred_zone,
- int migratetype)
+ int migratetype, int cold)
{
struct page *page;
do {
page = get_page_from_freelist(gfp_mask, nodemask, order,
zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
- preferred_zone, migratetype);
+ preferred_zone, migratetype, cold);
if (!page && gfp_mask & __GFP_NOFAIL)
congestion_wait(WRITE, HZ/50);
@@ -1668,7 +1667,7 @@ static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, struct zone *preferred_zone,
- int migratetype)
+ int migratetype, int cold)
{
const gfp_t wait = gfp_mask & __GFP_WAIT;
struct page *page = NULL;
@@ -1701,7 +1700,7 @@ restart:
/* This is the last chance, in general, before the goto nopage. */
page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
- preferred_zone, migratetype);
+ preferred_zone, migratetype, cold);
if (page)
goto got_pg;
@@ -1709,7 +1708,7 @@ restart:
if (alloc_flags & ALLOC_NO_WATERMARKS) {
page = __alloc_pages_high_priority(gfp_mask, order,
zonelist, high_zoneidx, nodemask,
- preferred_zone, migratetype);
+ preferred_zone, migratetype, cold);
if (page)
goto got_pg;
}
@@ -1727,7 +1726,8 @@ restart:
zonelist, high_zoneidx,
nodemask,
alloc_flags, preferred_zone,
- migratetype, &did_some_progress);
+ migratetype, cold,
+ &did_some_progress);
if (page)
goto got_pg;
@@ -1740,7 +1740,7 @@ restart:
page = __alloc_pages_may_oom(gfp_mask, order,
zonelist, high_zoneidx,
nodemask, preferred_zone,
- migratetype);
+ migratetype, cold);
if (page)
goto got_pg;
@@ -1780,6 +1780,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
struct zone *preferred_zone;
struct page *page;
int migratetype = allocflags_to_migratetype(gfp_mask);
+ int cold = gfp_mask & __GFP_COLD;
might_sleep_if(gfp_mask & __GFP_WAIT);
@@ -1803,11 +1804,11 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
/* First allocation attempt */
page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
- preferred_zone, migratetype);
+ preferred_zone, migratetype, cold);
if (unlikely(!page))
page = __alloc_pages_slowpath(gfp_mask, order,
zonelist, high_zoneidx, nodemask,
- preferred_zone, migratetype);
+ preferred_zone, migratetype, cold);
return page;
}
--
1.5.6.5
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2009-03-20 15:29 UTC|newest]
Thread overview: 51+ messages / expand[flat|nested] mbox.gz Atom feed top
2009-03-20 10:02 [PATCH 00/25] Cleanup and optimise the page allocator V5 Mel Gorman
2009-03-20 10:02 ` [PATCH 01/25] Replace __alloc_pages_internal() with __alloc_pages_nodemask() Mel Gorman
2009-03-20 10:02 ` [PATCH 02/25] Do not sanity check order in the fast path Mel Gorman
2009-03-20 10:02 ` [PATCH 03/25] Do not check NUMA node ID when the caller knows the node is valid Mel Gorman
2009-03-20 10:02 ` [PATCH 04/25] Check only once if the zonelist is suitable for the allocation Mel Gorman
2009-03-20 10:02 ` [PATCH 05/25] Break up the allocator entry point into fast and slow paths Mel Gorman
2009-03-20 10:02 ` [PATCH 06/25] Move check for disabled anti-fragmentation out of fastpath Mel Gorman
2009-03-20 10:02 ` [PATCH 07/25] Check in advance if the zonelist needs additional filtering Mel Gorman
2009-03-20 15:04 ` Christoph Lameter
2009-03-20 10:02 ` [PATCH 08/25] Calculate the preferred zone for allocation only once Mel Gorman
2009-03-20 15:06 ` Christoph Lameter
2009-03-20 15:29 ` Mel Gorman
2009-03-20 10:02 ` [PATCH 09/25] Calculate the migratetype " Mel Gorman
2009-03-20 15:08 ` Christoph Lameter
2009-03-20 10:02 ` [PATCH 10/25] Calculate the alloc_flags " Mel Gorman
2009-03-20 10:02 ` Mel Gorman [this message]
2009-03-20 15:09 ` [PATCH 11/25] Calculate the cold parameter " Christoph Lameter
2009-04-21 15:13 ` Mel Gorman
2009-04-21 15:25 ` Christoph Lameter
2009-04-21 15:47 ` Mel Gorman
2009-03-20 10:02 ` [PATCH 12/25] Remove a branch by assuming __GFP_HIGH == ALLOC_HIGH Mel Gorman
2009-03-20 10:03 ` [PATCH 13/25] Inline __rmqueue_smallest() Mel Gorman
2009-03-20 10:03 ` [PATCH 14/25] Inline buffered_rmqueue() Mel Gorman
2009-03-20 10:03 ` [PATCH 15/25] Inline __rmqueue_fallback() Mel Gorman
2009-03-20 10:03 ` [PATCH 16/25] Save text by reducing call sites of __rmqueue() Mel Gorman
2009-03-20 10:03 ` [PATCH 17/25] Do not call get_pageblock_migratetype() more than necessary Mel Gorman
2009-03-20 10:03 ` [PATCH 18/25] Do not disable interrupts in free_page_mlock() Mel Gorman
2009-03-20 10:03 ` [PATCH 19/25] Do not setup zonelist cache when there is only one node Mel Gorman
2009-03-20 10:03 ` [PATCH 20/25] Do not check for compound pages during the page allocator sanity checks Mel Gorman
2009-03-20 10:03 ` [PATCH 21/25] Use allocation flags as an index to the zone watermark Mel Gorman
2009-03-20 10:03 ` [PATCH 22/25] Update NR_FREE_PAGES only as necessary Mel Gorman
2009-03-20 10:03 ` [PATCH 23/25] Get the pageblock migratetype without disabling interrupts Mel Gorman
2009-03-20 10:03 ` [PATCH 24/25] Re-sort GFP flags and fix whitespace alignment for easier reading Mel Gorman
2009-03-20 10:03 ` [PATCH 25/25] Use a pre-calculated value instead of num_online_nodes() in fast paths Mel Gorman
2009-03-20 15:00 ` [PATCH 00/25] Cleanup and optimise the page allocator V5 Christoph Lameter
2009-03-20 15:37 ` Mel Gorman
2009-03-20 16:04 ` Christoph Lameter
2009-03-20 16:41 ` Mel Gorman
2009-03-20 16:07 ` Christoph Lameter
2009-03-20 16:27 ` Mel Gorman
2009-03-20 19:43 ` Christoph Lameter
2009-03-23 11:52 ` Mel Gorman
2009-03-23 13:30 ` Christoph Lameter
2009-03-23 14:59 ` Mel Gorman
-- strict thread matches above, loose matches on Subject: below --
2009-04-20 22:19 [PATCH 00/25] Cleanup and optimise the page allocator V6 Mel Gorman
2009-04-20 22:19 ` [PATCH 11/25] Calculate the cold parameter for allocation only once Mel Gorman
2009-04-21 7:43 ` Pekka Enberg
2009-04-21 8:41 ` Mel Gorman
2009-04-21 9:07 ` KOSAKI Motohiro
2009-04-21 10:08 ` Mel Gorman
2009-04-21 14:59 ` Christoph Lameter
2009-04-21 14:58 ` Christoph Lameter
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1237543392-11797-12-git-send-email-mel@csn.ul.ie \
--to=mel@csn.ul.ie \
--cc=akpm@linux-foundation.org \
--cc=cl@linux-foundation.org \
--cc=kosaki.motohiro@jp.fujitsu.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=ming.m.lin@intel.com \
--cc=npiggin@suse.de \
--cc=peterz@infradead.org \
--cc=yanmin_zhang@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).