From: Mel Gorman <mgorman@suse.de>
To: Linux-MM <linux-mm@kvack.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>, Dave Hansen <dave@sr71.net>,
Christoph Lameter <cl@linux.com>,
LKML <linux-kernel@vger.kernel.org>, Mel Gorman <mgorman@suse.de>
Subject: [PATCH 17/22] mm: page allocator: Move magazine access behind accessors
Date: Wed, 8 May 2013 17:03:02 +0100 [thread overview]
Message-ID: <1368028987-8369-18-git-send-email-mgorman@suse.de> (raw)
In-Reply-To: <1368028987-8369-1-git-send-email-mgorman@suse.de>
In preparation for splitting the magazines, move them behind accessors.
Signed-off-by: Mel Gorman <mgorman@suse.de>
---
include/linux/mmzone.h | 4 ++--
mm/page_alloc.c | 57 +++++++++++++++++++++++++++++++++-----------------
mm/vmstat.c | 5 +----
3 files changed, 41 insertions(+), 25 deletions(-)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index ca04853..4eb5151 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -370,8 +370,8 @@ struct zone {
* Keep some order-0 pages on a separate free list
* protected by an irq-unsafe lock
*/
- spinlock_t magazine_lock;
- struct free_area_magazine noirq_magazine;
+ spinlock_t _magazine_lock;
+ struct free_area_magazine _noirq_magazine;
#ifndef CONFIG_SPARSEMEM
/*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6760e00..36ffff0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1074,11 +1074,33 @@ void mark_free_pages(struct zone *zone)
#define MAGAZINE_ALLOC_BATCH (384)
#define MAGAZINE_FREE_BATCH (64)
+static inline struct free_area_magazine *find_lock_magazine(struct zone *zone)
+{
+ struct free_area_magazine *area = &zone->_noirq_magazine;
+ spin_lock(&zone->_magazine_lock);
+ return area;
+}
+
+static inline struct free_area_magazine *find_lock_filled_magazine(struct zone *zone)
+{
+ struct free_area_magazine *area = &zone->_noirq_magazine;
+ if (!area->nr_free)
+ return NULL;
+ spin_lock(&zone->_magazine_lock);
+ return area;
+}
+
+static inline void unlock_magazine(struct free_area_magazine *area)
+{
+ struct zone *zone = container_of(area, struct zone, _noirq_magazine);
+ spin_unlock(&zone->_magazine_lock);
+}
+
static
-struct page *__rmqueue_magazine(struct zone *zone, int migratetype)
+struct page *__rmqueue_magazine(struct free_area_magazine *area,
+ int migratetype)
{
struct page *page;
- struct free_area_magazine *area = &(zone->noirq_magazine);
if (list_empty(&area->free_list[migratetype]))
return NULL;
@@ -1092,9 +1114,9 @@ struct page *__rmqueue_magazine(struct zone *zone, int migratetype)
return page;
}
-static void magazine_drain(struct zone *zone, int migratetype)
+static void magazine_drain(struct zone *zone, struct free_area_magazine *area,
+ int migratetype)
{
- struct free_area_magazine *area = &(zone->noirq_magazine);
struct list_head *list;
struct page *page;
unsigned int batch_free = 0;
@@ -1104,7 +1126,7 @@ static void magazine_drain(struct zone *zone, int migratetype)
LIST_HEAD(free_list);
if (area->nr_free < MAGAZINE_LIMIT) {
- spin_unlock(&zone->magazine_lock);
+ unlock_magazine(area);
return;
}
@@ -1139,7 +1161,7 @@ static void magazine_drain(struct zone *zone, int migratetype)
}
/* Free the list of pages to the buddy allocator */
- spin_unlock(&zone->magazine_lock);
+ unlock_magazine(area);
spin_lock_irqsave(&zone->lock, flags);
while (!list_empty(&free_list)) {
page = list_entry(free_list.prev, struct page, lru);
@@ -1188,13 +1210,12 @@ void free_base_page(struct page *page)
}
/* Put the free page on the magazine list */
- spin_lock(&zone->magazine_lock);
- area = &(zone->noirq_magazine);
+ area = find_lock_magazine(zone);
list_add(&page->lru, &area->free_list[migratetype]);
area->nr_free++;
/* Drain the magazine if necessary, releases the magazine lock */
- magazine_drain(zone, migratetype);
+ magazine_drain(zone, area, migratetype);
}
/* Free a list of 0-order pages */
@@ -1307,20 +1328,18 @@ static
struct page *rmqueue_magazine(struct zone *zone, int migratetype)
{
struct page *page = NULL;
+ struct free_area_magazine *area = find_lock_filled_magazine(zone);
- /* Only acquire the lock if there is a reasonable chance of success */
- if (zone->noirq_magazine.nr_free) {
- spin_lock(&zone->magazine_lock);
retry:
- page = __rmqueue_magazine(zone, migratetype);
- spin_unlock(&zone->magazine_lock);
+ if (area) {
+ page = __rmqueue_magazine(area, migratetype);
+ unlock_magazine(area);
}
/* Try refilling the magazine on allocaion failure */
if (!page) {
LIST_HEAD(alloc_list);
unsigned long flags;
- struct free_area_magazine *area = &(zone->noirq_magazine);
unsigned int i;
unsigned int nr_alloced = 0;
@@ -1340,7 +1359,7 @@ retry:
if (!nr_alloced)
return NULL;
- spin_lock(&zone->magazine_lock);
+ area = find_lock_magazine(zone);
list_splice(&alloc_list, &area->free_list[migratetype]);
area->nr_free += nr_alloced;
goto retry;
@@ -3782,8 +3801,8 @@ static void __meminit zone_init_free_lists(struct zone *zone)
for_each_migratetype_order(order, t) {
INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
zone->free_area[order].nr_free = 0;
- INIT_LIST_HEAD(&zone->noirq_magazine.free_list[t]);
- zone->noirq_magazine.nr_free = 0;
+ INIT_LIST_HEAD(&zone->_noirq_magazine.free_list[t]);
+ zone->_noirq_magazine.nr_free = 0;
}
}
@@ -4333,7 +4352,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
zone->name = zone_names[j];
spin_lock_init(&zone->lock);
spin_lock_init(&zone->lru_lock);
- spin_lock_init(&zone->magazine_lock);
+ spin_lock_init(&zone->_magazine_lock);
zone_seqlock_init(zone);
zone->zone_pgdat = pgdat;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 7274ca5..3db0d52 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1003,15 +1003,12 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
")"
"\n noirq magazine");
seq_printf(m,
- "\n cpu: %i"
"\n count: %lu",
- i,
- zone->noirq_magazine.nr_free);
+ zone->_noirq_magazine.nr_free);
#ifdef CONFIG_SMP
for_each_online_cpu(i) {
struct per_cpu_pageset *pageset;
-
pageset = per_cpu_ptr(zone->pageset, i);
seq_printf(m, "\n pagesets\n vm stats threshold: %d",
pageset->stat_threshold);
--
1.8.1.4
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2013-05-08 16:03 UTC|newest]
Thread overview: 33+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-05-08 16:02 [RFC PATCH 00/22] Per-cpu page allocator replacement prototype Mel Gorman
2013-05-08 16:02 ` [PATCH 01/22] mm: page allocator: Lookup pageblock migratetype with IRQs enabled during free Mel Gorman
2013-05-08 16:02 ` [PATCH 02/22] mm: page allocator: Push down where IRQs are disabled during page free Mel Gorman
2013-05-08 16:02 ` [PATCH 03/22] mm: page allocator: Use unsigned int for order in more places Mel Gorman
2013-05-08 16:02 ` [PATCH 04/22] mm: page allocator: Only check migratetype of pages being drained while CMA active Mel Gorman
2013-05-08 16:02 ` [PATCH 05/22] oom: Use number of online nodes when deciding whether to suppress messages Mel Gorman
2013-05-08 16:02 ` [PATCH 06/22] mm: page allocator: Convert hot/cold parameter and immediate callers to bool Mel Gorman
2013-05-08 16:02 ` [PATCH 07/22] mm: page allocator: Do not lookup the pageblock migratetype during allocation Mel Gorman
2013-05-08 16:02 ` [PATCH 08/22] mm: page allocator: Remove the per-cpu page allocator Mel Gorman
2013-05-08 16:02 ` [PATCH 09/22] mm: page allocator: Allocate/free order-0 pages from a per-zone magazine Mel Gorman
2013-05-08 18:41 ` Christoph Lameter
2013-05-09 15:23 ` Mel Gorman
2013-05-09 16:21 ` Christoph Lameter
2013-05-09 17:27 ` Mel Gorman
2013-05-09 18:08 ` Christoph Lameter
2013-05-08 16:02 ` [PATCH 10/22] mm: page allocator: Allocate and free pages from magazine in batches Mel Gorman
2013-05-08 16:02 ` [PATCH 11/22] mm: page allocator: Shrink the magazine to the migratetypes in use Mel Gorman
2013-05-08 16:02 ` [PATCH 12/22] mm: page allocator: Remove knowledge of hot/cold from page allocator Mel Gorman
2013-05-08 16:02 ` [PATCH 13/22] mm: page allocator: Use list_splice to refill the magazine Mel Gorman
2013-05-08 16:02 ` [PATCH 14/22] mm: page allocator: Do not disable IRQs just to update stats Mel Gorman
2013-05-08 16:03 ` [PATCH 15/22] mm: page allocator: Check if interrupts are enabled only once per allocation attempt Mel Gorman
2013-05-08 16:03 ` [PATCH 16/22] mm: page allocator: Remove coalescing improvement heuristic during page free Mel Gorman
2013-05-08 16:03 ` Mel Gorman [this message]
2013-05-08 16:03 ` [PATCH 18/22] mm: page allocator: Split magazine lock in two to reduce contention Mel Gorman
2013-05-09 15:21 ` Dave Hansen
2013-05-15 19:44 ` Andi Kleen
2013-05-08 16:03 ` [PATCH 19/22] mm: page allocator: Watch for magazine and zone lock contention Mel Gorman
2013-05-08 16:03 ` [PATCH 20/22] mm: page allocator: Hold magazine lock for a batch of pages Mel Gorman
2013-05-08 16:03 ` [PATCH 21/22] mm: compaction: Release free page list under a batched magazine lock Mel Gorman
2013-05-08 16:03 ` [PATCH 22/22] mm: page allocator: Drain magazines for direct compact failures Mel Gorman
2013-05-09 15:41 ` [RFC PATCH 00/22] Per-cpu page allocator replacement prototype Dave Hansen
2013-05-09 16:25 ` Christoph Lameter
2013-05-09 17:33 ` Mel Gorman
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1368028987-8369-18-git-send-email-mgorman@suse.de \
--to=mgorman@suse.de \
--cc=cl@linux.com \
--cc=dave@sr71.net \
--cc=hannes@cmpxchg.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).