From: Mel Gorman <mgorman@suse.de>
To: Linux-MM <linux-mm@kvack.org>,
Linux-FSDevel <linux-fsdevel@vger.kernel.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>,
Vlastimil Babka <vbabka@suse.cz>, Jan Kara <jack@suse.cz>,
Michal Hocko <mhocko@suse.cz>, Hugh Dickins <hughd@google.com>,
Mel Gorman <mgorman@suse.de>,
Linux Kernel <linux-kernel@vger.kernel.org>
Subject: [PATCH 04/17] mm: page_alloc: Calculate classzone_idx once from the zonelist ref
Date: Thu, 1 May 2014 09:44:35 +0100 [thread overview]
Message-ID: <1398933888-4940-5-git-send-email-mgorman@suse.de> (raw)
In-Reply-To: <1398933888-4940-1-git-send-email-mgorman@suse.de>
There is no need to calculate zone_idx(preferred_zone) multiple times
or use the pgdat to figure it out.
Signed-off-by: Mel Gorman <mgorman@suse.de>
---
mm/page_alloc.c | 55 ++++++++++++++++++++++++++++++++-----------------------
1 file changed, 32 insertions(+), 23 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index cb12b9a..3b6ae9d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1907,17 +1907,15 @@ static inline void init_zone_allows_reclaim(int nid)
static struct page *
get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
- struct zone *preferred_zone, int migratetype)
+ struct zone *preferred_zone, int classzone_idx, int migratetype)
{
struct zoneref *z;
struct page *page = NULL;
- int classzone_idx;
struct zone *zone;
nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
int zlc_active = 0; /* set if using zonelist_cache */
int did_zlc_setup = 0; /* just call zlc_setup() one time */
- classzone_idx = zone_idx(preferred_zone);
zonelist_scan:
/*
* Scan zonelist, looking for a zone with enough free.
@@ -2174,7 +2172,7 @@ static inline struct page *
__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, struct zone *preferred_zone,
- int migratetype)
+ int classzone_idx, int migratetype)
{
struct page *page;
@@ -2192,7 +2190,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
order, zonelist, high_zoneidx,
ALLOC_WMARK_HIGH|ALLOC_CPUSET,
- preferred_zone, migratetype);
+ preferred_zone, classzone_idx, migratetype);
if (page)
goto out;
@@ -2227,7 +2225,7 @@ static struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
- int migratetype, bool sync_migration,
+ int classzone_idx, int migratetype, bool sync_migration,
bool *contended_compaction, bool *deferred_compaction,
unsigned long *did_some_progress)
{
@@ -2255,7 +2253,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
page = get_page_from_freelist(gfp_mask, nodemask,
order, zonelist, high_zoneidx,
alloc_flags & ~ALLOC_NO_WATERMARKS,
- preferred_zone, migratetype);
+ preferred_zone, classzone_idx, migratetype);
if (page) {
preferred_zone->compact_blockskip_flush = false;
compaction_defer_reset(preferred_zone, order, true);
@@ -2287,7 +2285,7 @@ static inline struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
- int migratetype, bool sync_migration,
+ int classzone_idx, int migratetype, bool sync_migration,
bool *contended_compaction, bool *deferred_compaction,
unsigned long *did_some_progress)
{
@@ -2328,7 +2326,7 @@ static inline struct page *
__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
- int migratetype, unsigned long *did_some_progress)
+ int classzone_idx, int migratetype, unsigned long *did_some_progress)
{
struct page *page = NULL;
bool drained = false;
@@ -2346,7 +2344,8 @@ retry:
page = get_page_from_freelist(gfp_mask, nodemask, order,
zonelist, high_zoneidx,
alloc_flags & ~ALLOC_NO_WATERMARKS,
- preferred_zone, migratetype);
+ preferred_zone, classzone_idx,
+ migratetype);
/*
* If an allocation failed after direct reclaim, it could be because
@@ -2369,14 +2368,14 @@ static inline struct page *
__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, struct zone *preferred_zone,
- int migratetype)
+ int classzone_idx, int migratetype)
{
struct page *page;
do {
page = get_page_from_freelist(gfp_mask, nodemask, order,
zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
- preferred_zone, migratetype);
+ preferred_zone, classzone_idx, migratetype);
if (!page && gfp_mask & __GFP_NOFAIL)
wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
@@ -2477,7 +2476,7 @@ static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, struct zone *preferred_zone,
- int migratetype)
+ int classzone_idx, int migratetype)
{
const gfp_t wait = gfp_mask & __GFP_WAIT;
struct page *page = NULL;
@@ -2526,15 +2525,19 @@ restart:
* Find the true preferred zone if the allocation is unconstrained by
* cpusets.
*/
- if (!(alloc_flags & ALLOC_CPUSET) && !nodemask)
- first_zones_zonelist(zonelist, high_zoneidx, NULL,
- &preferred_zone);
+ if (!(alloc_flags & ALLOC_CPUSET) && !nodemask) {
+ struct zoneref *preferred_zoneref;
+ preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx,
+ nodemask ? : &cpuset_current_mems_allowed,
+ &preferred_zone);
+ classzone_idx = zonelist_zone_idx(preferred_zoneref);
+ }
rebalance:
/* This is the last chance, in general, before the goto nopage. */
page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
- preferred_zone, migratetype);
+ preferred_zone, classzone_idx, migratetype);
if (page)
goto got_pg;
@@ -2549,7 +2552,7 @@ rebalance:
page = __alloc_pages_high_priority(gfp_mask, order,
zonelist, high_zoneidx, nodemask,
- preferred_zone, migratetype);
+ preferred_zone, classzone_idx, migratetype);
if (page) {
goto got_pg;
}
@@ -2582,6 +2585,7 @@ rebalance:
zonelist, high_zoneidx,
nodemask,
alloc_flags, preferred_zone,
+ classzone_idx,
migratetype, sync_migration,
&contended_compaction,
&deferred_compaction,
@@ -2605,7 +2609,8 @@ rebalance:
zonelist, high_zoneidx,
nodemask,
alloc_flags, preferred_zone,
- migratetype, &did_some_progress);
+ classzone_idx, migratetype,
+ &did_some_progress);
if (page)
goto got_pg;
@@ -2624,7 +2629,7 @@ rebalance:
page = __alloc_pages_may_oom(gfp_mask, order,
zonelist, high_zoneidx,
nodemask, preferred_zone,
- migratetype);
+ classzone_idx, migratetype);
if (page)
goto got_pg;
@@ -2667,6 +2672,7 @@ rebalance:
zonelist, high_zoneidx,
nodemask,
alloc_flags, preferred_zone,
+ classzone_idx,
migratetype, sync_migration,
&contended_compaction,
&deferred_compaction,
@@ -2694,11 +2700,13 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
{
enum zone_type high_zoneidx = gfp_zone(gfp_mask);
struct zone *preferred_zone;
+ struct zoneref *preferred_zoneref;
struct page *page = NULL;
int migratetype = allocflags_to_migratetype(gfp_mask);
unsigned int cpuset_mems_cookie;
int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
struct mem_cgroup *memcg = NULL;
+ int classzone_idx;
gfp_mask &= gfp_allowed_mask;
@@ -2728,11 +2736,12 @@ retry_cpuset:
cpuset_mems_cookie = read_mems_allowed_begin();
/* The preferred zone is used for statistics later */
- first_zones_zonelist(zonelist, high_zoneidx,
+ preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx,
nodemask ? : &cpuset_current_mems_allowed,
&preferred_zone);
if (!preferred_zone)
goto out;
+ classzone_idx = zonelist_zone_idx(preferred_zoneref);
#ifdef CONFIG_CMA
if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
@@ -2742,7 +2751,7 @@ retry:
/* First allocation attempt */
page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
zonelist, high_zoneidx, alloc_flags,
- preferred_zone, migratetype);
+ preferred_zone, classzone_idx, migratetype);
if (unlikely(!page)) {
/*
* The first pass makes sure allocations are spread
@@ -2768,7 +2777,7 @@ retry:
gfp_mask = memalloc_noio_flags(gfp_mask);
page = __alloc_pages_slowpath(gfp_mask, order,
zonelist, high_zoneidx, nodemask,
- preferred_zone, migratetype);
+ preferred_zone, classzone_idx, migratetype);
}
trace_mm_page_alloc(page, order, gfp_mask, migratetype);
--
1.8.4.5
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2014-05-01 8:44 UTC|newest]
Thread overview: 57+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-05-01 8:44 [PATCH 00/17] Misc page alloc, shmem, mark_page_accessed and page_waitqueue optimisations Mel Gorman
2014-05-01 8:44 ` [PATCH 01/17] mm: page_alloc: Do not update zlc unless the zlc is active Mel Gorman
2014-05-01 13:25 ` Johannes Weiner
2014-05-06 15:04 ` Rik van Riel
2014-05-01 8:44 ` [PATCH 02/17] mm: page_alloc: Do not treat a zone that cannot be used for dirty pages as "full" Mel Gorman
2014-05-06 15:09 ` Rik van Riel
2014-05-01 8:44 ` [PATCH 03/17] mm: page_alloc: Use jump labels to avoid checking number_of_cpusets Mel Gorman
2014-05-06 15:10 ` Rik van Riel
2014-05-06 20:23 ` Peter Zijlstra
2014-05-06 22:21 ` Mel Gorman
2014-05-07 9:04 ` Peter Zijlstra
2014-05-07 9:43 ` Mel Gorman
2014-05-01 8:44 ` Mel Gorman [this message]
2014-05-06 16:01 ` [PATCH 04/17] mm: page_alloc: Calculate classzone_idx once from the zonelist ref Rik van Riel
2014-05-01 8:44 ` [PATCH 05/17] mm: page_alloc: Only check the zone id check if pages are buddies Mel Gorman
2014-05-06 16:48 ` Rik van Riel
2014-05-01 8:44 ` [PATCH 06/17] mm: page_alloc: Only check the alloc flags and gfp_mask for dirty once Mel Gorman
2014-05-06 17:24 ` Rik van Riel
2014-05-01 8:44 ` [PATCH 07/17] mm: page_alloc: Take the ALLOC_NO_WATERMARK check out of the fast path Mel Gorman
2014-05-06 17:25 ` Rik van Riel
2014-05-01 8:44 ` [PATCH 08/17] mm: page_alloc: Use word-based accesses for get/set pageblock bitmaps Mel Gorman
2014-05-02 22:34 ` Sasha Levin
2014-05-04 13:14 ` Mel Gorman
2014-05-05 12:40 ` Vlastimil Babka
2014-05-06 9:13 ` Mel Gorman
2014-05-06 14:42 ` Vlastimil Babka
2014-05-06 15:12 ` Mel Gorman
2014-05-06 20:34 ` Peter Zijlstra
2014-05-06 22:24 ` Mel Gorman
2014-05-01 8:44 ` [PATCH 09/17] mm: page_alloc: Reduce number of times page_to_pfn is called Mel Gorman
2014-05-06 18:47 ` Rik van Riel
2014-05-01 8:44 ` [PATCH 10/17] mm: page_alloc: Lookup pageblock migratetype with IRQs enabled during free Mel Gorman
2014-05-06 18:48 ` Rik van Riel
2014-05-01 8:44 ` [PATCH 11/17] mm: page_alloc: Use unsigned int for order in more places Mel Gorman
2014-05-01 14:35 ` Dave Hansen
2014-05-01 15:11 ` Mel Gorman
2014-05-01 15:38 ` Dave Hansen
2014-05-06 18:49 ` Rik van Riel
2014-05-01 8:44 ` [PATCH 12/17] mm: page_alloc: Convert hot/cold parameter and immediate callers to bool Mel Gorman
2014-05-06 18:49 ` Rik van Riel
2014-05-01 8:44 ` [PATCH 13/17] mm: shmem: Avoid atomic operation during shmem_getpage_gfp Mel Gorman
2014-05-06 18:53 ` Rik van Riel
2014-05-01 8:44 ` [PATCH 14/17] mm: Do not use atomic operations when releasing pages Mel Gorman
2014-05-01 13:29 ` Johannes Weiner
2014-05-01 13:39 ` Mel Gorman
2014-05-01 13:47 ` Johannes Weiner
2014-05-06 18:54 ` Rik van Riel
2014-05-01 8:44 ` [PATCH 15/17] mm: Do not use unnecessary atomic operations when adding pages to the LRU Mel Gorman
2014-05-01 13:33 ` Johannes Weiner
2014-05-01 13:40 ` Mel Gorman
2014-05-06 15:30 ` Vlastimil Babka
2014-05-06 15:55 ` Mel Gorman
2014-05-01 8:44 ` [PATCH 16/17] mm: Non-atomically mark page accessed during page cache allocation where possible Mel Gorman
2014-05-01 8:44 ` [PATCH 17/17] mm: filemap: Avoid unnecessary barries and waitqueue lookup in unlock_page fastpath Mel Gorman
2014-05-05 10:50 ` Jan Kara
2014-05-07 9:03 ` Mel Gorman
2014-05-06 20:30 ` Peter Zijlstra
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1398933888-4940-5-git-send-email-mgorman@suse.de \
--to=mgorman@suse.de \
--cc=hannes@cmpxchg.org \
--cc=hughd@google.com \
--cc=jack@suse.cz \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@suse.cz \
--cc=vbabka@suse.cz \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).