From: Mel Gorman <mgorman@suse.de>
To: Linux-MM <linux-mm@kvack.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>, Dave Hansen <dave@sr71.net>,
Christoph Lameter <cl@linux.com>,
LKML <linux-kernel@vger.kernel.org>, Mel Gorman <mgorman@suse.de>
Subject: [PATCH 04/22] mm: page allocator: Only check migratetype of pages being drained while CMA active
Date: Wed, 8 May 2013 17:02:49 +0100 [thread overview]
Message-ID: <1368028987-8369-5-git-send-email-mgorman@suse.de> (raw)
In-Reply-To: <1368028987-8369-1-git-send-email-mgorman@suse.de>
CMA added a is_migrate_isolate_page in the bulk page free path which
does a pageblock migratetype lookup for every page being drained. This
is only necessary when CMA is active so skip the expensive checks in the
normal case.
Signed-off-by: Mel Gorman <mgorman@suse.de>
---
include/linux/mmzone.h | 8 ++++++--
include/linux/page-isolation.h | 7 ++++---
mm/page_alloc.c | 2 +-
mm/page_isolation.c | 27 +++++++++++++++++++++++----
4 files changed, 34 insertions(+), 10 deletions(-)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index e71e3a6..57f03b3 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -354,12 +354,16 @@ struct zone {
spinlock_t lock;
int all_unreclaimable; /* All pages pinned */
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
+ unsigned long compact_cached_free_pfn;
+ unsigned long compact_cached_migrate_pfn;
+
/* Set to true when the PG_migrate_skip bits should be cleared */
bool compact_blockskip_flush;
/* pfns where compaction scanners should start */
- unsigned long compact_cached_free_pfn;
- unsigned long compact_cached_migrate_pfn;
+#endif
+#ifdef CONFIG_MEMORY_ISOLATION
+ bool memory_isolation_active;
#endif
#ifdef CONFIG_MEMORY_HOTPLUG
/* see spanned/present_pages for more description */
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 3fff8e7..81287bb 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -2,16 +2,17 @@
#define __LINUX_PAGEISOLATION_H
#ifdef CONFIG_MEMORY_ISOLATION
-static inline bool is_migrate_isolate_page(struct page *page)
+static inline bool is_migrate_isolate_page(struct zone *zone, struct page *page)
{
- return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
+ return zone->memory_isolation_active &&
+ get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
}
static inline bool is_migrate_isolate(int migratetype)
{
return migratetype == MIGRATE_ISOLATE;
}
#else
-static inline bool is_migrate_isolate_page(struct page *page)
+static inline bool is_migrate_isolate_page(struct zone *zone, struct page *page)
{
return false;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4a07771..f170260 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -674,7 +674,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
/* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
__free_one_page(page, zone, 0, mt);
trace_mm_page_pcpu_drain(page, 0, mt);
- if (likely(!is_migrate_isolate_page(page))) {
+ if (likely(!is_migrate_isolate_page(zone, page))) {
__mod_zone_page_state(zone, NR_FREE_PAGES, 1);
if (is_migrate_cma(mt))
__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1);
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 383bdbb..9f0c068 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -118,6 +118,8 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
unsigned long pfn;
unsigned long undo_pfn;
struct page *page;
+ struct zone *zone = NULL;
+ unsigned long flags;
BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
@@ -126,12 +128,20 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
pfn < end_pfn;
pfn += pageblock_nr_pages) {
page = __first_valid_page(pfn, pageblock_nr_pages);
- if (page &&
- set_migratetype_isolate(page, skip_hwpoisoned_pages)) {
- undo_pfn = pfn;
- goto undo;
+ if (page) {
+ if (!zone)
+ zone = page_zone(page);
+ if (set_migratetype_isolate(page,
+ skip_hwpoisoned_pages)) {
+ undo_pfn = pfn;
+ goto undo;
+ }
}
}
+
+ spin_lock_irqsave(&zone->lock, flags);
+ zone->memory_isolation_active = true;
+ spin_unlock_irqrestore(&zone->lock, flags);
return 0;
undo:
for (pfn = start_pfn;
@@ -150,6 +160,9 @@ int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
{
unsigned long pfn;
struct page *page;
+ struct zone *zone = NULL;
+ unsigned long flags;
+
BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
for (pfn = start_pfn;
@@ -159,7 +172,13 @@ int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
continue;
unset_migratetype_isolate(page, migratetype);
+ if (!zone)
+ zone = page_zone(page);
}
+
+ spin_lock_irqsave(&zone->lock, flags);
+ zone->memory_isolation_active = true;
+ spin_unlock_irqrestore(&zone->lock, flags);
return 0;
}
/*
--
1.8.1.4
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2013-05-08 16:03 UTC|newest]
Thread overview: 33+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-05-08 16:02 [RFC PATCH 00/22] Per-cpu page allocator replacement prototype Mel Gorman
2013-05-08 16:02 ` [PATCH 01/22] mm: page allocator: Lookup pageblock migratetype with IRQs enabled during free Mel Gorman
2013-05-08 16:02 ` [PATCH 02/22] mm: page allocator: Push down where IRQs are disabled during page free Mel Gorman
2013-05-08 16:02 ` [PATCH 03/22] mm: page allocator: Use unsigned int for order in more places Mel Gorman
2013-05-08 16:02 ` Mel Gorman [this message]
2013-05-08 16:02 ` [PATCH 05/22] oom: Use number of online nodes when deciding whether to suppress messages Mel Gorman
2013-05-08 16:02 ` [PATCH 06/22] mm: page allocator: Convert hot/cold parameter and immediate callers to bool Mel Gorman
2013-05-08 16:02 ` [PATCH 07/22] mm: page allocator: Do not lookup the pageblock migratetype during allocation Mel Gorman
2013-05-08 16:02 ` [PATCH 08/22] mm: page allocator: Remove the per-cpu page allocator Mel Gorman
2013-05-08 16:02 ` [PATCH 09/22] mm: page allocator: Allocate/free order-0 pages from a per-zone magazine Mel Gorman
2013-05-08 18:41 ` Christoph Lameter
2013-05-09 15:23 ` Mel Gorman
2013-05-09 16:21 ` Christoph Lameter
2013-05-09 17:27 ` Mel Gorman
2013-05-09 18:08 ` Christoph Lameter
2013-05-08 16:02 ` [PATCH 10/22] mm: page allocator: Allocate and free pages from magazine in batches Mel Gorman
2013-05-08 16:02 ` [PATCH 11/22] mm: page allocator: Shrink the magazine to the migratetypes in use Mel Gorman
2013-05-08 16:02 ` [PATCH 12/22] mm: page allocator: Remove knowledge of hot/cold from page allocator Mel Gorman
2013-05-08 16:02 ` [PATCH 13/22] mm: page allocator: Use list_splice to refill the magazine Mel Gorman
2013-05-08 16:02 ` [PATCH 14/22] mm: page allocator: Do not disable IRQs just to update stats Mel Gorman
2013-05-08 16:03 ` [PATCH 15/22] mm: page allocator: Check if interrupts are enabled only once per allocation attempt Mel Gorman
2013-05-08 16:03 ` [PATCH 16/22] mm: page allocator: Remove coalescing improvement heuristic during page free Mel Gorman
2013-05-08 16:03 ` [PATCH 17/22] mm: page allocator: Move magazine access behind accessors Mel Gorman
2013-05-08 16:03 ` [PATCH 18/22] mm: page allocator: Split magazine lock in two to reduce contention Mel Gorman
2013-05-09 15:21 ` Dave Hansen
2013-05-15 19:44 ` Andi Kleen
2013-05-08 16:03 ` [PATCH 19/22] mm: page allocator: Watch for magazine and zone lock contention Mel Gorman
2013-05-08 16:03 ` [PATCH 20/22] mm: page allocator: Hold magazine lock for a batch of pages Mel Gorman
2013-05-08 16:03 ` [PATCH 21/22] mm: compaction: Release free page list under a batched magazine lock Mel Gorman
2013-05-08 16:03 ` [PATCH 22/22] mm: page allocator: Drain magazines for direct compact failures Mel Gorman
2013-05-09 15:41 ` [RFC PATCH 00/22] Per-cpu page allocator replacement prototype Dave Hansen
2013-05-09 16:25 ` Christoph Lameter
2013-05-09 17:33 ` Mel Gorman
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1368028987-8369-5-git-send-email-mgorman@suse.de \
--to=mgorman@suse.de \
--cc=cl@linux.com \
--cc=dave@sr71.net \
--cc=hannes@cmpxchg.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).